diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 95329af45011..d7103a7232f7 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -197,5 +197,6 @@ source "drivers/net/ethernet/xircom/Kconfig" source "drivers/net/ethernet/phytium/Kconfig" source "drivers/net/ethernet/guangruntong/Kconfig" source "drivers/net/ethernet/bzwx/Kconfig" +source "drivers/net/ethernet/linkdata/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index dc909f86f04b..9da5d1e94855 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -109,3 +109,4 @@ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_NET_VENDOR_PHYTIUM) += phytium/ obj-$(CONFIG_NET_VENDOR_GRT) += guangruntong/ obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ +obj-$(NET_VENDOR_LINKDATA) += linkdata/ diff --git a/drivers/net/ethernet/linkdata/Kconfig b/drivers/net/ethernet/linkdata/Kconfig new file mode 100644 index 000000000000..74f7bce3cbd2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Linkdata network device configuration +# + +config NET_VENDOR_LINKDATA + bool "Linkdata devices" + default y + help + If you have a network (Ethernet) card from Linkdata, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Intel cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_LINKDATA +source "drivers/net/ethernet/linkdata/sxe/Kconfig" +source "drivers/net/ethernet/linkdata/sxevf/Kconfig" +endif # NET_VENDOR_LINKDATA diff --git a/drivers/net/ethernet/linkdata/Makefile b/drivers/net/ethernet/linkdata/Makefile new file mode 100644 index 000000000000..3a33b8f6a4c3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linkdata network device drivers. +# +obj-$(CONFIG_SXE) += sxe/ +obj-$(CONFIG_SXE_VF) += sxevf/ diff --git a/drivers/net/ethernet/linkdata/sxe/Kconfig b/drivers/net/ethernet/linkdata/sxe/Kconfig new file mode 100644 index 000000000000..26e56d842d28 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# sxe network device configuration +# + +config SXE + tristate "sxe PCI Express adapters support" + depends on PCI + select MDIO + select PHYLIB + select PTP_1588_CLOCK + help + This driver supports sxe PCI Express family of adapters. + + To compile this driver as a module, choose M here. The module + will be called ngbe. diff --git a/drivers/net/ethernet/linkdata/sxe/Makefile b/drivers/net/ethernet/linkdata/sxe/Makefile new file mode 100644 index 000000000000..b689894084f0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/Makefile @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: GPL-2.0 +CONFIG_MODULE_SIG=n + +#当前目录下的Makefile路径 +MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST))) +#当前路径 +CURDIR :=$(shell dirname $(MAKEPATH)) + +ifneq ($(KERNELRELEASE),) +#编译 +CONFIG_SXE ?= m +obj-$(CONFIG_SXE) += sxe.o +sxe-objs += $(patsubst %.c, sxepf/%.o, $(notdir $(wildcard $(CURDIR)/sxepf/*.c))) +sxe-objs += $(patsubst %.c, base/trace/%.o, $(notdir $(wildcard $(CURDIR)/base/trace/*.c))) +sxe-objs += $(patsubst %.c, base/log/%.o, $(notdir $(wildcard $(CURDIR)/base/log/*.c))) + +#添加编译选项和编译宏 +ccflags-y += -Werror -Wmaybe-uninitialized -frecord-gcc-switches +ccflags-y += -I$(CURDIR)/sxepf +ccflags-y += -I$(CURDIR)/include/sxe +ccflags-y += -I$(CURDIR)/include +ccflags-y += -I$(CURDIR)/base/compat +ccflags-y += -I$(CURDIR)/base/trace +ccflags-y += -I$(CURDIR)/base/log +ccflags-y += -DSXE_HOST_DRIVER +ccflags-y += -DSXE_DRIVER_RELEASE +ccflags-$(CONFIG_DCB) += -DSXE_DCB_CONFIGURE +ifneq ($(CONFIG_DCA), ) + ccflags-y += -DSXE_TPH_CONFIGURE +endif + +# 生成 linux kernel version code +ifneq ($(wildcard $(CURDIR)/vercode_build.sh),) + KER_DIR=$(srctree) + SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR)) + ifneq ($(SPECIFIC_LINUX),) + ccflags-y += -DSPECIFIC_LINUX + ccflags-y += -D$(SPECIFIC_LINUX) + endif +endif + +else # KERNELRELEASE +#内核树路径 +KDIR := /lib/modules/$(shell uname -r)/build + +all: + @$(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt + @rm -rf ./sxepf/*.o ./sxepf/.*.cmd + @rm -rf ./base/log/*.o ./base/trace/*.o + +endif # KERNELRELEASE diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat.h new file mode 100644 index 000000000000..1698929c4fab --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat.h @@ -0,0 +1,28 @@ +#ifndef __SXE_COMPAT_H__ +#define __SXE_COMPAT_H__ + +#include "sxe_compat_gcc.h" + +#include "sxe_compat_inc.h" + +#include "sxe_compat_vercode.h" + +#ifdef SPECIFIC_LINUX +#include "sxe_compat_spec.h" +#elif RHEL_RELEASE_CODE +#include "sxe_compat_rhel.h" +#elif UBUNTU_VERSION_CODE +#include "sxe_compat_ubuntu.h" +#elif OPENEULER_VERSION_CODE +#include "sxe_compat_euler.h" +#elif KYLIN_RELEASE_CODE +#include "sxe_compat_kylin.h" +#elif SUSE_PRODUCT_CODE +#include "sxe_compat_suse.h" +#endif + +#ifndef SXE_KERNEL_MATCHED +#include "sxe_compat_std.h" +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_euler.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_euler.h new file mode 100644 index 000000000000..7a810ced458b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_euler.h @@ -0,0 +1,54 @@ +#ifndef __SXE_COMPAT_EULER_H__ +#define __SXE_COMPAT_EULER_H__ + +#if !OPENEULER_VERSION_CODE +#error "OPENEULER_VERSION_CODE is 0 or undefined" +#endif + +#if defined OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE == OPENEULER_VERSION(2203,1)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if defined OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE == OPENEULER_VERSION(2203,2)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_gcc.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_gcc.h new file mode 100644 index 000000000000..56425964356a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_gcc.h @@ -0,0 +1,14 @@ +#ifndef __SXE_COMPAT_GCC_H__ +#define __SXE_COMPAT_GCC_H__ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) +#endif +#else +# define fallthrough do {} while (0) +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_inc.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_inc.h new file mode 100644 index 000000000000..0acf3dc2a336 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_inc.h @@ -0,0 +1,6 @@ +#ifndef __SXE_COMPAT_INC_H__ +#define __SXE_COMPAT_INC_H__ + +#include + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_kylin.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_kylin.h new file mode 100644 index 000000000000..bb245e86966f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_kylin.h @@ -0,0 +1,77 @@ +#ifndef __SXE_COMPAT_KYLIN_H__ +#define __SXE_COMPAT_KYLIN_H__ + +#if !KYLIN_RELEASE_CODE +#error "KYLIN_RELEASE_CODE is 0 or undefined" +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define HAVE_NDO_SET_VF_LINK_STATE +#define SXE_KERNEL_MATCHED +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,3)) +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define HAVE_AF_XDP_ZERO_COPY +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC + +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,4)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_overflow.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_overflow.h new file mode 100644 index 000000000000..39ed8f073a34 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_overflow.h @@ -0,0 +1,178 @@ + +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + + +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + +#endif + +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_rhel.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_rhel.h new file mode 100644 index 000000000000..e4ace7925bca --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_rhel.h @@ -0,0 +1,377 @@ +#ifndef __SXE_COMPAT_RHEL_H__ +#define __SXE_COMPAT_RHEL_H__ + +#if !RHEL_RELEASE_CODE +#error "RHEL_RELEASE_CODE is 0 or undefined" +#endif + +#ifndef RHEL_RELEASE_VERSION +#error "RHEL_RELEASE_VERSION is undefined" +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,6)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,7)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,8)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,9)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) + +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_NDO_SET_VF_LINK_STATE +#define NDO_SET_FEATURES_RTN_0 +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define netdev_xmit_more() (skb->xmit_more) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define netdev_xmit_more() (skb->xmit_more) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,2)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,3)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,4)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,5)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,6)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,7)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,2)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_spec.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_spec.h new file mode 100644 index 000000000000..a91964415e65 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_spec.h @@ -0,0 +1,310 @@ +#ifndef __SXE_COMPAT_SPEC_H__ +#define __SXE_COMPAT_SPEC_H__ + +#ifndef SPECIFIC_LINUX +#error "SPECIFIC_LINUX is undefined" +#endif + +#ifdef NFS_4_0_0613 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define netdev_xmit_more() (skb->xmit_more) +#define SXE_KERNEL_MATCHED +#endif + +#ifdef NFS_4_0_0612 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define netdev_xmit_more() (skb->xmit_more) +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1050 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1060_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1060_5_10 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1070_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1070_5_10 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef CULINUX_3_0 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_10_SP2 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_0429 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_0721 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define HAVE_NETDEV_NESTED_PRIV +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef ANOLIS_8_8 +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef EULER_2203_LTS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef BCLINUX_8_2_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#ifdef BCLINUX_8_2_5_10 +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_std.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_std.h new file mode 100644 index 000000000000..cf8727f755d5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_std.h @@ -0,0 +1,550 @@ +#ifndef __SXE_COMPAT_STD_H__ +#define __SXE_COMPAT_STD_H__ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +#ifdef SXE_KERNEL_MATCHED +#error "SXE_KERNEL_MATCHED is defined" +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,6))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif +#endif +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,1))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#if (OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE >= OPENEULER_VERSION(2203,1))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE > SUSE_PRODUCT(1,15,2,0))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#undef XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#endif +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5))) +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) +#define HAVE_XDP_BUFF_DATA_META +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif +#else +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_XDP_BUFF_INIT_API +#endif +#else +#define HAVE_XDP_BUFF_INIT_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_XDP_PREPARE_BUFF_API +#endif +#else +#define HAVE_XDP_PREPARE_BUFF_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_NETDEV_NESTED_PRIV +#endif +#else +#define HAVE_NETDEV_NESTED_PRIV +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TIMEOUT_TXQUEUE_IDX +#endif +#else +#define HAVE_TIMEOUT_TXQUEUE_IDX +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_NET_PREFETCH_API +#endif +#else +#define HAVE_NET_PREFETCH_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#endif +#else +#define HAVE_NDO_FDB_ADD_EXTACK +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif +#else +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#else +#define HAVE_NDO_SET_VF_LINK_STATE +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#undef ETH_GET_HEADLEN_API_NEED_2_PARAM +#endif + +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#undef ETH_GET_HEADLEN_API_NEED_2_PARAM +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_OFF_API +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,14,241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#undef NEED_SKB_FRAG_OFF_API +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,200) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#undef NEED_SKB_FRAG_OFF_API +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif + +#if (UBUNTU_VERSION_CODE && (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999))) +#undef NEED_SKB_FRAG_OFF_API +#endif + +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1,15,2,0))) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif + +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,0)) +#define HAVE_NDO_ETH_IOCTL +#endif +#else +#define HAVE_NDO_ETH_IOCTL +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)) +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#else +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,2)) +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#endif +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif +#else +#define HAVE_SKB_XMIT_MORE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#ifdef HAVE_SKB_XMIT_MORE +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define netdev_xmit_more() (skb->xmit_more) +#endif +#else +#define netdev_xmit_more() (0) +#endif +#endif + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) +#define DCBNL_OPS_GETAPP_RETURN_U8 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_DMA_ATTRS_STRUCT +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NET_DEVICE_EXTENDED +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) +#ifndef skb_frag_size +#define NEED_SKB_FRAG_SIZE_API +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_BOOTTIME_SECONDS +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +#define NOT_INCLUDE_SCTP_H +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) +#define HAVE_XDP_SUPPORT +#endif + +#ifdef HAVE_XDP_SUPPORT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)) +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#define HAVE_XDP_QUERY_PROG +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#undef HAVE_XDP_QUERY_PROG +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)) +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,7))) +#if (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(9,0)) +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#endif +#endif +#endif + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,7))) +#if RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(9,0) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif +#endif + +#if (OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE >= OPENEULER_VERSION(2203,1))) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_AF_XDP_ZERO_COPY +#endif +#else +#define HAVE_AF_XDP_ZERO_COPY +#endif + +#ifdef HAVE_AF_XDP_ZERO_COPY +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif +#else +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#endif +#else +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1,15,2,0))) +#define HAVE_NDO_XSK_WAKEUP +#endif +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_NDO_XSK_WAKEUP +#endif +#else +#define HAVE_NDO_XSK_WAKEUP +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5)) +#undef XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#define NEED_XSK_BUFF_POOL_RENAME +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5)) +#undef NEED_XSK_BUFF_POOL_RENAME +#endif +#endif + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#define XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,1)) +#undef XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#else +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,3)) +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#endif +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0)) +#define CLASS_CREATE_NEED_1_PARAM +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0)) +#define DEFINE_SEMAPHORE_NEED_CNT +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) +#define DELETE_PCIE_ERROR_REPORTING +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)) +#define HAVE_ETH_HW_ADDR_SET_API +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define HAVE_NO_XDP_BUFF_RXQ +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_OVERFLOW_H +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); + + __free_pages(page, compound_order(page)); +} +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#undef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#define NO_VOID_NDO_GET_STATS64 +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#undef NO_VOID_NDO_GET_STATS64 +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define NO_NETDEVICE_MIN_MAX_MTU +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#undef NO_NETDEVICE_MIN_MAX_MTU +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (!RHEL_RELEASE_CODE) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) +{ + napi_complete(napi); + + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define HAVE_NO_PCIE_FLR +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#define HAVE_NO_HWTSTAMP_FILTER_NTP_ALL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#define NO_NEED_SIGNAL_H +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_MACVLAN_DEST_FILTER +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_SB_BIND_CHANNEL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_MACVLAN_RELEASE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_SET_MACVLAN_MODE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NO_NEED_POOL_DEFRAG +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define HAVE_NO_WALK_UPPER_DEV +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_suse.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_suse.h new file mode 100644 index 000000000000..61c2906f6d41 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_suse.h @@ -0,0 +1,57 @@ +#ifndef __SXE_COMPAT_SUSE_H__ +#define __SXE_COMPAT_SUSE_H__ + +#if !CONFIG_SUSE_KERNEL +#error "CONFIG_SUSE_KERNEL is 0 or undefined" +#endif + +#if !SUSE_PRODUCT_CODE +#error "SUSE_PRODUCT_CODE is 0 or undefined" +#endif + +#if defined SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1,15,2,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XSK_UMEM_ADJUST_OFFSET +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define SXE_KERNEL_MATCHED +#endif + +#if defined SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1,15,4,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_ubuntu.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_ubuntu.h new file mode 100644 index 000000000000..e933f6cecf62 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_ubuntu.h @@ -0,0 +1,83 @@ +#ifndef __SXE_COMPAT_UBUNTU_H__ +#define __SXE_COMPAT_UBUNTU_H__ + +#if !UTS_UBUNTU_RELEASE_ABI +#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" +#endif + +#if !UBUNTU_VERSION_CODE +#error "UBUNTU_VERSION_CODE is 0 or undefined" +#endif + +#ifndef UBUNTU_VERSION +#error "UBUNTU_VERSION is undefined" +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,5,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define SXE_KERNEL_MATCHED +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,11,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,12,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,15,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,16,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_vercode.h b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_vercode.h new file mode 100644 index 000000000000..2794ce11b799 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/compat/sxe_compat_vercode.h @@ -0,0 +1,61 @@ +#ifndef __SXE_COMPAT_VERCODE_H__ +#define __SXE_COMPAT_VERCODE_H__ + + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifndef UTS_RELEASE +#include +#endif + +#ifndef RHEL_RELEASE_CODE +#define RHEL_RELEASE_CODE 0 +#endif +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#define UBUNTU_VERSION(a,b,c,d) (((a) << 24) + ((b) << 16) + (d)) + +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) +#if UTS_UBUNTU_RELEASE_ABI > 65535 +#error UTS_UBUNTU_RELEASE_ABI is larger than 65535... +#endif +#endif + +#ifndef OPENEULER_VERSION_CODE +#define OPENEULER_VERSION_CODE 0 +#endif +#ifndef OPENEULER_VERSION +#define OPENEULER_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef KYLIN_RELEASE_CODE +#define KYLIN_RELEASE_CODE 0 +#endif +#ifndef KYLIN_RELEASE_VERSION +#define KYLIN_RELEASE_VERSION(a,b) ((a << 8) + b) +#endif + +#ifdef CONFIG_SUSE_KERNEL +#include +#endif +#ifndef SUSE_PRODUCT_CODE +#define SUSE_PRODUCT_CODE 0 +#endif +#ifndef SUSE_PRODUCT +#define SUSE_PRODUCT(product, version, patchlevel, auxrelease) \ + (((product) << 24) + ((version) << 16) + \ + ((patchlevel) << 8) + (auxrelease)) +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.c b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.c new file mode 100644 index 000000000000..1b5f889adbb9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.c @@ -0,0 +1,1128 @@ +#include +#include +#include +#include +#include +#include +#include "sxe_log.h" +#include "sxe_compat.h" + +#if (defined SXE_DRIVER_DEBUG && defined __KERNEL__) || (defined SXE_DRIVER_TRACE) + +int time_for_file_name(char *buff, int buf_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + struct timeval tv; + struct tm td; + + do_gettimeofday(&tv); + time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); +#else + struct timespec64 tv; + struct tm td; + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); +#endif + return snprintf(buff, buf_len, "%04ld-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +int sxe_file_write(struct file *file, char *buf, int len) +{ + int ret = 0; + + void *journal; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) + mm_segment_t old_fs; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0) + old_fs = get_fs(); + set_fs(get_ds()); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) +#else + old_fs = force_uaccess_begin(); +#endif + + journal = current->journal_info; + current->journal_info = NULL; + + if (!file){ + return 0; + } + + do{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,10,0) + ret = file->f_op->write(file, buf, len, &file->f_pos); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + ret = vfs_write(file, buf, len, &file->f_pos); +#else + ret = kernel_write(file, buf, len, &file->f_pos); +#endif + }while( ret == -EINTR ); + + if (ret >= 0) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + fsnotify_modify(file); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + if ( file->f_path.dentry) { + fsnotify_modify(file->f_path.dentry); + } +#endif + } + + current->journal_info = journal; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) + set_fs(old_fs); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) +#else + force_uaccess_end(old_fs); +#endif + + return ret; +} +#endif + +#if defined SXE_DRIVER_DEBUG && defined __KERNEL__ + +#define FILE_NAME_SIZE 128 +#define SXE_KLOG_OUT_WAIT (5 * HZ) +#define SWITCH_FILE +#define LOG_PATH_LEN 100 +#define DRV_LOG_FILE_SIZE_MIN_MB 10 +#define DRV_LOG_FILE_SIZE_MAX_MB 200 + +sxe_debug_t g_sxe_debug; +char g_log_path_str[LOG_PATH_LEN] = {0}; +char g_log_path_bin[LOG_PATH_LEN] = {0}; + +static char g_log_path[80] = {0}; +module_param_string(g_log_path, g_log_path, 80, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_path, "the path host driver will be saved(<80 chars) Default: /var/log"); + +static u32 g_log_file_size = 200; +module_param(g_log_file_size, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_file_size, + "single driver log file size(10MB ~ 200MB), Default: 200, Unit: MB"); + +static u32 g_log_space_size = 0; +module_param(g_log_space_size, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_space_size, + "the space allowed host driver log to be store, Default: 0(unlimited), Unit: MB"); + +static u32 g_log_tty = 0; +module_param(g_log_tty, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_tty, + "allow driver log(ERROR, WARN, INFO) output to tty console, Default: 0(not allowed)"); + +static inline int time_for_log(char *buff, int buf_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + struct timeval tv; + struct tm td; + + do_gettimeofday(&tv); + time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); + + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_usec); +#else + struct timespec64 tv; + struct tm td; + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_nsec*1000); +#endif +} + +static inline char *sxe_stack_top(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + ULong *ptr = (ULong*)(current->thread_info + 1); +#else + ULong *ptr = (ULong*)(task_thread_info(current) + 1 ); +#endif + return (char*)(ptr + 1); +} + +static inline sxe_thread_local_t *sxe_thread_local_get(sxe_thread_key_t * key) +{ + return (sxe_thread_local_t*)(sxe_stack_top() + key->offset); +} + +void sxe_thread_key_create(int size, sxe_thread_key_t *key) +{ + key->offset = g_sxe_debug.key_offset; + g_sxe_debug.key_offset += sizeof(sxe_thread_local_t) + size; +} + +void *sxe_thread_get_specific(sxe_thread_key_t *key) +{ + sxe_thread_local_t *local = sxe_thread_local_get(key); + if (local->magic != DEBUG_TRACE_MAGIC) + { + return NULL; + } + return (void*)local->data; +} + +void sxe_thread_clear_specific(sxe_thread_key_t *key) +{ + sxe_thread_local_t *local = sxe_thread_local_get(key); + local->magic = 0; +} + +int sxe_filter_file_add(char *name) +{ + debug_file_t *file = NULL; + + file = (debug_file_t*)kmalloc(sizeof(debug_file_t), GFP_ATOMIC); + if (!file){ + sxe_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strncpy(file->name, name, sizeof(file->name)); + INIT_LIST_HEAD(&file->list); + + list_add_rcu(&file->list, &g_sxe_debug.filter_file); + return 0; +} + +void sxe_filter_file_del(char *filename) +{ + debug_file_t *file = NULL; + + list_for_each_entry_rcu(file, &g_sxe_debug.filter_file, list){ + if(!strcmp(file->name, filename)){ + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + return; + } + } + return; +} + +void sxe_log_level_modify(u32 level) +{ + sxe_level_set(level); +} + +char* sxe_log_path_query(void) +{ +#ifndef __cplusplus + return g_log_path; +#else + return NULL; +#endif +} + +u32 sxe_log_space_size_query(void) +{ + return g_log_space_size; +} + +u32 sxe_log_file_size_query(void) +{ + return g_log_file_size; +} + +void sxe_log_file_size_modify(u32 size) +{ + g_log_file_size = size; +} + +u32 sxe_log_tty_query(void) +{ + return g_log_tty; +} + +#ifndef SXE_CFG_RELEASE +static inline int sxe_filter_file_print(const char *filename) +{ + debug_file_t *file; + rcu_read_lock(); + list_for_each_entry_rcu(file, &g_sxe_debug.filter_file, list){ + if(!strcmp(file->name, filename)){ + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static inline int sxe_filter_func_print(const char *name) +{ + debug_func_t *func; + + rcu_read_lock(); + list_for_each_entry_rcu(func, &g_sxe_debug.filter_func, list){ + if(!strcmp(func->name, name)){ + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +#endif +void sxe_filter_file_clear(void) +{ + debug_file_t *file = NULL; + + do{ + file = list_first_or_null_rcu( + &g_sxe_debug.filter_file, + debug_file_t, + list); + if (file){ + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + } + }while(file); + + return; +} + +int sxe_filter_func_add(char *name) +{ + debug_func_t *func = NULL; + + func = (debug_func_t *)kmalloc(sizeof(debug_func_t), GFP_ATOMIC); + if (!func){ + sxe_print(KERN_ERR,NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strncpy(func->name, name, sizeof(func->name)); + INIT_LIST_HEAD(&func->list); + + list_add_rcu(&func->list, &g_sxe_debug.filter_func); + return 0; +} + +void sxe_filter_func_del(char *name) +{ + debug_func_t *func = NULL; + + list_for_each_entry_rcu(func, &g_sxe_debug.filter_func, list){ + if(!strcmp(func->name, name)){ + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + return; + } + } + return; +} + +void sxe_filter_func_clear(void) +{ + debug_func_t *func = NULL; + + do{ + func = list_first_or_null_rcu( + &g_sxe_debug.filter_func, + debug_func_t, + list); + if (func){ + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + } + }while(func); + + return; +} + +static void sxe_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +static int sxe_file_open(sxe_log_t *log, struct file **pp_file) +{ + struct file *file; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int flags_rewrite = O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC; + int err = 0; + int len = 0; + char filename[FILE_NAME_SIZE]; + +#ifdef SWITCH_FILE + memset(filename, 0, FILE_NAME_SIZE); + len += snprintf(filename, PAGE_SIZE, "%s", log->file_path); + if (log->file_num == 0) { + time_for_file_name(filename + len, FILE_NAME_SIZE - len); + } else { + snprintf(filename + len, FILE_NAME_SIZE - len, "%04d", log->index++); + log->index = log->index % log->file_num; + } + + if(log->file_num == 1 && log->file != NULL) { + sxe_file_close(&log->file); + log->file_pos = 0; + } +#else + memset(filename, 0, FILE_NAME_SIZE); + strncpy(filename, path, FILE_NAME_SIZE); +#endif + if (log->file_num == 0) { + file = filp_open(filename, flags_new, 0666); + } else { + file = filp_open(filename, flags_rewrite, 0666); + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + if (err == -ENOENT) { + file = filp_open(filename, flags_new, 0666); + } + } + } + if (IS_ERR(file)){ + err = (int)PTR_ERR(file); + sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", filename, err); + goto l_out; + } + mapping_set_gfp_mask(file->f_path.dentry->d_inode->i_mapping, GFP_NOFS); + + sxe_print(,NULL,"redirect file %s\n", filename); + + *pp_file = file; + +l_out: + return err; +} + +static void sxe_file_sync(struct file *file) +{ + struct address_space *mapping; + void *journal; + int ret = 0; + int err; + + (void)ret; + (void)err; + + if( !file || !file->f_op || !file->f_op->fsync ){ + goto l_end; + } + + journal = current->journal_info; + current->journal_info = NULL; + + mapping = file->f_mapping; + + ret = filemap_fdatawrite(mapping); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + mutex_lock(&mapping->host->i_mutex); + err = file->f_op->fsync(file, file->f_path.dentry, 1); + if( !ret ){ + ret = err; + } + mutex_unlock(&mapping->host->i_mutex); + err = filemap_fdatawait(mapping); + if( !ret ){ + ret = err; + } + +#else + err = file->f_op->fsync(file, 0, file->f_mapping->host->i_size, 1); +#endif + + current->journal_info = journal; + +l_end: + return; +} + +static void sxe_klog_in(sxe_log_t *log, char *buf, const int len) +{ + int begin = 0; + int end = 0; + int free_size; + ULong flags; + + spin_lock_irqsave(&log->lock, flags); + + if (log->head > log->tail) { + sxe_print(KERN_WARNING, NULL, "FAILURE: log head exceeds log tail\n"); + SXE_BUG_NO_SYNC(); + } + + free_size = log->buf_size - (log->tail - log->head); + + if (free_size <= len){ + log->is_drop = 1; + spin_unlock_irqrestore(&log->lock, flags); + return; + } + + begin = log->tail % log->buf_size; + end = (log->tail + len) % log->buf_size; + + if (begin < end){ + memcpy(log->buf + begin, buf, len); + } + else{ + memcpy(log->buf + begin, buf, log->buf_size - begin); + memcpy(log->buf, buf + log->buf_size - begin, end); + } + + log->tail = log->tail + len; + + spin_unlock_irqrestore(&log->lock, flags); + + return; +} + +static void sxe_klog_out(sxe_log_t *log) +{ + int len = 0; + int rc = 0; + long long tail; + int begin; + int end; + int schedule_count_th = 0; + const int max_loop = 4096; + +#ifdef SWITCH_FILE + struct file *file = NULL; +#endif + + if (log->file == NULL) { + rc = sxe_file_open(log, &log->file); + if (log->file != NULL) { + log->file_pos = 0; + } else { + return; + } + } + + do { + tail = log->tail; + begin = log->head % log->buf_size; + end = tail % log->buf_size; + len = 0; + rc = 0; + + schedule_count_th++; + if ((schedule_count_th >= max_loop)) { + schedule_count_th = 0; + schedule_timeout_interruptible(SXE_KLOG_OUT_WAIT); + } + + if (log->is_drop) { + rc = sxe_file_write( + log->file, + DEBUG_DROP_LOG_STRING, + strlen(DEBUG_DROP_LOG_STRING)); + if (rc < 0) { + break; + } + log->is_drop = 0; + } + + if (begin < end) { + rc = sxe_file_write( + log->file, + log->buf + begin, + end - begin); + if (rc > 0) { + len += rc; + } + } else if(begin > end) { + rc = sxe_file_write( + log->file, + log->buf + begin, + log->buf_size - begin); + if (rc > 0) { + len += rc; + rc = sxe_file_write(log->file, log->buf, end); + if (rc > 0) { + len += rc; + } + } + } + log->head += len; + log->file_pos += len; + + LOG_BUG_ON(log->head > log->tail, "FAILURE: log head exceeds log tail\n"); + }while (log->head != log->tail && rc > 0); + + if (rc < 0) { + sxe_print(KERN_ERR, NULL, "write file %s error %d\n", log->file_path, rc); + return ; + } + +#ifdef SWITCH_FILE + if (log->file_pos >= log->file_size) { + rc = sxe_file_open(log, &file); + if (rc >= 0 && log->file != NULL && log->file_num != 1) { + sxe_file_close(&log->file); + log->file = file; + log->file_pos = 0; + } + } +#endif + return ; +} + +static int sxe_klog_flush(void *arg) +{ + int i; + + while (!kthread_should_stop()){ + schedule_timeout_interruptible(SXE_KLOG_OUT_WAIT); + + for (i = 0; i < ARRAY_SIZE(g_sxe_debug.log); i++){ + sxe_klog_out(&g_sxe_debug.log[i]); + } + } + return 0; +} + +static int sxe_klog_init( + sxe_log_t *log, + long long buf_size, + char *file_path, + long long file_size, + u32 file_num) +{ + int rc = 0; + + memset(log, 0, sizeof(*log)); + spin_lock_init(&log->lock); + + log->buf = (char*)vmalloc(buf_size+PER_CPU_PAGE_SIZE); + if (!log->buf){ + rc = -ENOMEM; + goto l_end; + } + + log->file = NULL; + log->head = 0; + log->tail = 0; + log->buf_size = buf_size; + + log->file_path = file_path; + log->file_pos = 0; + log->file_size = file_size; + log->file_num = file_num; + log->index = 0; +l_end: + return rc; +} + +static void sxe_klog_exit(sxe_log_t *log) +{ + if (log->buf) { + vfree(log->buf); + } + if (log->file) { + sxe_file_close(&log->file); + } +} + +static inline char *sxe_file_name_locale(char *file) +{ + char *p_slash = strrchr(file, '/'); + return (p_slash == NULL)?file:(p_slash+1); +} + +void sxe_level_set(int level) +{ + g_sxe_debug.level = level; +} + +s32 sxe_level_get(void) +{ + return (s32)g_sxe_debug.level; +} + +void sxe_bin_status_set(bool status) +{ + g_sxe_debug.status = status; +} + +s32 sxe_bin_status_get(void) +{ + return (s32)g_sxe_debug.status; +} + +void sxe_log_string( + debug_level_e level, + const char *dev_name, + const char *file, + const char *func, + int line, + const char *fmt,...) +{ + sxe_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + ULong flags = 0; + const char *name = dev_name ? dev_name : ""; + + va_list args; + + if (level > g_sxe_debug.level){ +#ifndef SXE_CFG_RELEASE + if (!sxe_filter_file_print(file) + && !sxe_filter_func_print(func)){ + return; + } +#else + return; +#endif + } + + if (!in_interrupt()){ + local_irq_save(flags); + } + + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + + len = snprintf(buf, PAGE_SIZE, "%s", sxe_debug_level_name(level)); + len += time_for_log(buf+len, PAGE_SIZE - len); + len += snprintf(buf+len, PAGE_SIZE - len, "[%d][%d][%s]%s:%4d:%s:", + raw_smp_processor_id(), current->pid, + name, + sxe_file_name_locale((char*)file), line, func); + + va_start(args, fmt); + len += vsnprintf( + buf + len, + PAGE_SIZE - len, + fmt, + args); + va_end(args); + + + if (!in_interrupt()){ + local_irq_restore(flags); + } + + if (sxe_log_tty_query()) { + if (buf[0] == 'I' || buf[0] == 'W') { + printk_ratelimited(KERN_WARNING"%s", buf + LOG_INFO_PREFIX_LEN); + } else if (buf[0] == 'E') { + printk_ratelimited(KERN_WARNING"%s", buf + LOG_ERROR_PREFIX_LEN); + } + } + sxe_klog_in(&g_sxe_debug.log[DEBUG_TYPE_STRING], buf, len); + + wake_up_process(g_sxe_debug.task); + + return; +} + +void sxe_log_binary( + const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + sxe_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + ULong flags = 0; + u32 i = 0; + u32 j = 0; + u32 max; + u32 mod; + + if (sxe_bin_status_get() != true) { + return; + } + + max = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + if (!in_interrupt()){ + local_irq_save(flags); + } + + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + + len += time_for_log(buf+len, PER_CPU_PAGE_SIZE - len); + len += snprintf(buf+len, PER_CPU_PAGE_SIZE - len, + "[%d] %s %s():%d %s size:%d\n", + current->pid, sxe_file_name_locale((char*)file), func, + line, str, size); + + for (i = 0; i < max; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (mod) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < size; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (!in_interrupt()) { + local_irq_restore(flags); + } + + sxe_klog_in(&g_sxe_debug.log[DEBUG_TYPE_BINARY], buf, len); + + wake_up_process(g_sxe_debug.task); + + return; +} + +void sxe_log_sync(void) +{ + sxe_file_sync(g_sxe_debug.log[DEBUG_TYPE_STRING].file); + sxe_file_sync(g_sxe_debug.log[DEBUG_TYPE_BINARY].file); +} + +static void sxe_log_file_prefix_add(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, BINARY_FILE_PREFIX); + } + + return; +} + +static void sxe_log_file_prefix_add_default(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, BINARY_FILE_PREFIX); + } + + return; +} + +static void sxe_log_file_path_set(bool is_vf) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", VF_LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", VF_BINARY_FILE_PATH); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", BINARY_FILE_PATH); + } + + return; +} + +int sxe_log_init(bool is_vf) +{ + struct task_struct *task = NULL; + sxe_ctxt_t *ctxt = NULL; + int rc = 0; + int i; + int nid; + u32 file_num = 0; + u32 log_path_len = 0; + u32 input_log_space = sxe_log_space_size_query(); + u32 input_log_file_size = sxe_log_file_size_query(); + unsigned int log_file_size = 0; + char *log_path_p = NULL; + sxe_log_t *log_bin = &g_sxe_debug.log[DEBUG_TYPE_BINARY]; + sxe_log_t *log_str = &g_sxe_debug.log[DEBUG_TYPE_STRING]; + + INIT_LIST_HEAD(&g_sxe_debug.filter_file); + INIT_LIST_HEAD(&g_sxe_debug.filter_func); + +#ifdef SXE_CFG_RELEASE + g_sxe_debug.level = LEVEL_INFO; + g_sxe_debug.status = false; +#else + g_sxe_debug.level = LEVEL_DEBUG; + g_sxe_debug.status = true; +#endif + + g_sxe_debug.ctxt = alloc_percpu(sxe_ctxt_t); + if (!g_sxe_debug.ctxt) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "alloc percpu failed\n"); + goto l_end; + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + memset(ctxt, 0, sizeof(*ctxt)); + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + nid = cpu_to_node(i); + + ctxt->page = alloc_pages_node(nid, GFP_ATOMIC, PAGE_ORDER); + if (!ctxt->page) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PER_CPU_PAGE_SIZE); + goto l_free_cpu_buff; + } + ctxt->buff = page_address(ctxt->page); + } + + log_path_p = sxe_log_path_query(); + log_path_len = strlen(log_path_p); + if (log_path_p != NULL && log_path_p[0] == '/') { + if (log_path_p[log_path_len] == '/') { + sxe_log_file_prefix_add(is_vf, log_path_p); + } else { + sxe_log_file_prefix_add_default(is_vf, log_path_p); + } + } else { + sxe_log_file_path_set(is_vf); + } + if (input_log_file_size < DRV_LOG_FILE_SIZE_MIN_MB || + input_log_file_size > DRV_LOG_FILE_SIZE_MAX_MB) { + sxe_log_file_size_modify(LOG_FILE_SIZE >> MEGABYTE); + input_log_file_size = LOG_FILE_SIZE >> MEGABYTE; + } + if (input_log_space && input_log_space < input_log_file_size) { + sxe_log_file_size_modify(input_log_space); + input_log_file_size = input_log_space; + } + log_file_size = input_log_file_size << MEGABYTE; + + if (input_log_space) { + file_num = input_log_space / input_log_file_size; + if (file_num == 0) { + sxe_print(KERN_ERR, NULL, "filenum shouldnot be 0\n"); + SXE_BUG(); + } + } else { + file_num = 0; + } + + rc = sxe_klog_init( + log_str, + BUF_SIZE, + g_log_path_str, + log_file_size, + file_num); + if (rc < 0) { + goto l_free_cpu_buff; + } + + rc = sxe_klog_init( + log_bin, + BUF_SIZE, + g_log_path_bin, + BINARY_FILE_SIZE, + 0); + if (rc < 0) { + goto l_free_string; + } + + task = kthread_create(sxe_klog_flush, NULL, "sxe_klog_flush"); + if (IS_ERR(task)) { + rc = (int)PTR_ERR(task); + sxe_print(KERN_ERR, NULL, "Create kernel thread, err: %d\n", rc); + goto l_free_binary; + } + wake_up_process(task); + g_sxe_debug.task = task; + rc = 0; + sxe_print(KERN_INFO, NULL, "sxe debug init logpath[%s] strlogsize[%dM] filenum[%d]\n", + g_log_path_str, (log_file_size >> MEGABYTE), log_str->file_num); +l_end: + return rc; + +l_free_binary: + sxe_klog_exit(&g_sxe_debug.log[DEBUG_TYPE_BINARY]); + +l_free_string: + sxe_klog_exit(&g_sxe_debug.log[DEBUG_TYPE_STRING]); + +l_free_cpu_buff: + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + if (ctxt && ctxt->page) { + __free_page(ctxt->page); + } + } + free_percpu(g_sxe_debug.ctxt); + goto l_end; +} + +void sxe_log_exit(void) +{ + int i = 0; + sxe_ctxt_t *ctxt; + + if (g_sxe_debug.task == NULL) { + return; + } + + kthread_stop(g_sxe_debug.task); + + for (i = 0; i < ARRAY_SIZE(g_sxe_debug.log); i++) { + sxe_klog_exit(&g_sxe_debug.log[i]); + } + + if (g_sxe_debug.ctxt) { + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + if (ctxt && ctxt->page) { + __free_page(ctxt->page); + } + } + + free_percpu(g_sxe_debug.ctxt); + g_sxe_debug.ctxt = NULL; + } +} + +#elif !defined SXE_DRIVER_RELEASE + +s32 g_sxe_log_level = LEVEL_INFO; +s32 g_sxe_bin_status = false; +char *test_bin_buf = NULL; + +s32 sxe_log_init(bool is_vf) +{ + return 0; +} + +void sxe_level_set(s32 level) +{ + g_sxe_log_level = level; +} + +s32 sxe_level_get(void) +{ + return g_sxe_log_level; +} + +void sxe_bin_status_set(bool status) +{ + g_sxe_bin_status = status; +} + +s32 sxe_bin_status_get(void) +{ + return g_sxe_bin_status; +} + +void sxe_log_sync(void) +{ +} + +void sxe_log_exit(void) +{ + if (test_bin_buf != NULL) { + free(test_bin_buf); + } +} + +void sxe_log_binary( + const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + u32 i = 0; + u32 j = 0; + u32 iMax; + u32 mod; + char *buf = NULL; + int len = 0; + + if (sxe_bin_status_get() != true) { + return; + } + + buf = zalloc(PER_CPU_PAGE_SIZE); + test_bin_buf = buf; + + iMax = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + len += snprintf(buf+len, PER_CPU_PAGE_SIZE - len, + "%s size:%d\n", str, size); + + for (i = 0; i < iMax; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (mod) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < size; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + printf("buf:%s", buf); + + return; +} + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.h b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.h new file mode 100644 index 000000000000..1712f9317937 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log.h @@ -0,0 +1,425 @@ + +#ifndef _SXE_LOG_H_ +#define _SXE_LOG_H_ + +#include "sxe_log_types.h" + +#ifdef SXE_TEST +#define STATIC +#else +#define STATIC static +#endif + +#ifdef __cplusplus +extern "C"{ +#endif + +#define SXE_HOST(ins) (ins)->host->host_no + +#define LOG_INFO_PREFIX_LEN 32 +#define LOG_ERROR_PREFIX_LEN 33 +#define MEGABYTE 20 + +typedef enum { + LEVEL_ERROR, + LEVEL_WARN, + LEVEL_INFO, + LEVEL_DEBUG, +}debug_level_e; + +static inline const S8 *sxe_debug_level_name(debug_level_e lv) +{ + static const S8 *level[] = { + [LEVEL_ERROR] = "ERROR", + [LEVEL_WARN] = "WARN", + [LEVEL_INFO] = "INFO", + [LEVEL_DEBUG] = "DEBUG", + }; + + return level[lv]; +} + +#ifdef __KERNEL__ + +#define PRINT_DEBUG KERN_DEBUG +#define PRINT_INFO KERN_INFO +#define PRINT_WARN KERN_WARNING +#define PRINT_ERR KERN_ERR + +#define sxe_print(level,bdf, fmt,...) \ + printk(level"[SXE]%s():%d:" fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#else + +#define PRINT_DEBUG LEVEL_DEBUG +#define PRINT_INFO LEVEL_INFO +#define PRINT_WARN LEVEL_WARN +#define PRINT_ERR LEVEL_ERROR + +#include +#include +#include +#include + +#define __percpu + +static inline U64 get_now_ms() { + struct timeval tv; + U64 timestamp = 0; + gettimeofday(&tv, NULL); + timestamp = tv.tv_sec * 1000 + tv.tv_usec/1000; + return timestamp; +} + +#define filename_printf(x) strrchr((x),'/')?strrchr((x),'/')+1:(x) + +#define sxe_print(level,bdf, fmt,...) do { \ + if (level <= sxe_level_get()) { \ + if (level == LEVEL_DEBUG) { \ + printf("DEBUG:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_INFO) { \ + printf("INFO:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_WARN) { \ + printf("WARN:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + }else if (level == LEVEL_ERROR) { \ + printf("ERROR:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } \ + } \ +} while(0) + +#endif + +#define LOG_BUG_ON(cond, fmt, ...) do { \ + if((cond)) { \ + LOG_ERROR(fmt, ##__VA_ARGS__); \ + LOG_SYNC(); \ + BUG(); \ + } \ +}while(0) + +#define DEBUG_TRACE_MAGIC 0x456789 +#define BUF_SIZE (1024LL << 10) + +#define PAGE_ORDER 2 +#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2)) + +#define LOG_FILE_SIZE (200LL << 20) +#define BINARY_FILE_SIZE (200LL << 20) + +#define VF_LOG_FILE_PATH "/var/log/sxevf.log" +#define VF_LOG_FILE_PREFIX "sxevf.log" +#define VF_BINARY_FILE_PATH "/var/log/sxevf.bin" +#define VF_BINARY_FILE_PREFIX "sxevf.bin" + +#define LOG_FILE_PATH "/var/log/sxe.log" +#define LOG_FILE_PREFIX "sxe.log" +#define BINARY_FILE_PATH "/var/log/sxe.bin" +#define BINARY_FILE_PREFIX "sxe.bin" + +#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n" + +enum { + DEBUG_TYPE_STRING, + DEBUG_TYPE_BINARY, + DEBUG_TYPE_NR, +}; + +typedef struct { + struct list_head list; + char name[64]; +} debug_func_t; + +typedef struct { + struct list_head list; + char name[64]; +} debug_file_t; + +typedef struct { + struct { + char *buf; + int buf_size; + long long head; + long long tail; + spinlock_t lock; + unsigned char is_drop; + }; + struct { + char *file_path; + struct file *file; + long long file_pos; + long long file_size; + U32 file_num; + U32 index; + }; +} sxe_log_t; + +typedef struct { + s32 magic; + char data[0]; +} sxe_thread_local_t; + +typedef struct { + struct page *page; + void *buff; +} sxe_ctxt_t; + +typedef struct { + s32 offset; +} sxe_thread_key_t; + +typedef struct { + debug_level_e level; + bool status; + u16 key_offset; + sxe_ctxt_t __percpu *ctxt; + struct list_head filter_func; + struct list_head filter_file; + struct task_struct *task; + sxe_log_t log[DEBUG_TYPE_NR]; +} sxe_debug_t; + +void sxe_level_set(int level); +s32 sxe_level_get(void); + +void sxe_bin_status_set(bool status); +s32 sxe_bin_status_get(void); + +int sxe_log_init(bool is_vf); +void sxe_log_exit(void); + +void sxe_log_string(debug_level_e level, const char *dev_name, const char *file, const char *func, + int line, const char *fmt, ...); + +void sxe_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str); + +#define DATA_DUMP(ptr, size, str) \ + sxe_log_binary(__FILE__, __FUNCTION__, __LINE__, (u8*)ptr, 0, size, str) + +void sxe_log_sync(void); + +#ifdef SXE_DRIVER_TRACE +int time_for_file_name(char *buff, int buf_len); +int sxe_file_write(struct file *file, char *buf, int len); +#endif + +#if defined SXE_DRIVER_DEBUG && defined __KERNEL__ + +#define WRITE_LOG(level, bdf, fmt, ...) \ + sxe_log_string(level, bdf, __FILE__, __FUNCTION__, __LINE__, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG(fmt, ...) WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) WRITE_LOG(LEVEL_INFO,NULL, fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) WRITE_LOG(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) WRITE_LOG(LEVEL_INFO,adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) WRITE_LOG(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) WRITE_LOG(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_SYNC() sxe_log_sync() + +#define LOG_DEV_DEBUG(format, arg...) \ + dev_dbg(&adapter->pdev->dev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg) + +#define LOG_DEV_INFO(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg) + +#define LOG_DEV_WARN(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg) + +#define LOG_DEV_ERR(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg) + +#define LOG_MSG_DEBUG(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg) + +#define LOG_MSG_INFO(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg) + +#define LOG_MSG_WARN(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg) + +#define LOG_MSG_ERR(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg) + +#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: "format, ## arg); +#define LOG_PR_INFO(format, arg...) pr_info("sxe: "format, ## arg); +#define LOG_PR_WARN(format, arg...) pr_warn("sxe: "format, ## arg); +#define LOG_PR_ERR(format, arg...) pr_err("sxe: "format, ## arg); +#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: "format, ## arg); +#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: "format, ## arg); +#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: "format, ## arg); +#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: "format, ## arg); + +#else + +#if defined SXE_DRIVER_RELEASE + +#define LOG_DEBUG(fmt, ...) +#define LOG_INFO(fmt, ...) +#define LOG_WARN(fmt, ...) +#define LOG_ERROR(fmt, ...) + +#define UNUSED(x) (void)(x) + +#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter) +#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter) +#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter) +#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter) + +#define LOG_DEV_DEBUG(format, arg...) \ + dev_dbg(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_INFO(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_WARN(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_ERR(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg); + +#define LOG_MSG_DEBUG(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_INFO(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_WARN(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_ERR(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: "format, ## arg); +#define LOG_PR_INFO(format, arg...) pr_info("sxe: "format, ## arg); +#define LOG_PR_WARN(format, arg...) pr_warn("sxe: "format, ## arg); +#define LOG_PR_ERR(format, arg...) pr_err("sxe: "format, ## arg); +#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: "format, ## arg); +#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: "format, ## arg); +#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: "format, ## arg); +#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: "format, ## arg); + +#else + +#define LOG_DEBUG(fmt, ...) sxe_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) sxe_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) sxe_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) sxe_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) sxe_print(LEVEL_INFO,adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_DEV_DEBUG(fmt, ...) \ + sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_INFO(fmt, ...) \ + sxe_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_WARN(fmt, ...) \ + sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_ERR(fmt, ...) \ + sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_MSG_DEBUG(msglvl, fmt, ...) \ + sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_INFO(msglvl, fmt, ...) \ + sxe_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_WARN(msglvl, fmt, ...) \ + sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_ERR(msglvl, fmt, ...) \ + sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_PR_DEBUG(fmt, ...) \ + sxe_print(PRINT_DEBUG, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_INFO(fmt, ...) \ + sxe_print(PRINT_INFO, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_WARN(fmt, ...) \ + sxe_print(PRINT_WARN, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_ERR(fmt, ...) \ + sxe_print(PRINT_ERR, "sxe", fmt, ##__VA_ARGS__); +#define LOG_PRVF_DEBUG(fmt, ...) \ + sxe_print(PRINT_DEBUG, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_INFO(fmt, ...) \ + sxe_print(PRINT_INFO, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_WARN(fmt, ...) \ + sxe_print(PRINT_WARN, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_ERR(fmt, ...) \ + sxe_print(PRINT_ERR, "sxevf", fmt, ##__VA_ARGS__); + +#endif + +#define LOG_SYNC() + +#endif + +#if defined SXE_DRIVER_RELEASE +#define SXE_BUG_ON(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +}while(0) + +#define SXE_BUG() +#define SXE_BUG_ON_NO_SYNC(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +}while(0) + +#define SXE_BUG_NO_SYNC() +#else +#define SXE_BUG_ON(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_SYNC(); \ + } \ + BUG_ON(cond); \ +}while(0) + +#define SXE_BUG(void) do { \ + LOG_SYNC(); \ + BUG(void); \ +}while(0) + +#define SXE_BUG_ON_NO_SYNC(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ + BUG_ON(cond); \ +}while(0) + +#define SXE_BUG_NO_SYNC(void) do { \ + BUG(void); \ +}while(0) + +#endif + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log_types.h b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log_types.h new file mode 100644 index 000000000000..d302e7160f55 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/log/sxe_log_types.h @@ -0,0 +1,94 @@ + +#ifndef __SXE_LOG_TYPES_H__ +#define __SXE_LOG_TYPES_H__ + +#ifdef __cplusplus +extern "C"{ +#endif + +#include + +typedef unsigned char U8; +typedef unsigned short U16; +typedef unsigned int U32; +typedef unsigned long ULong; +typedef unsigned long long U64; + +typedef char S8; +typedef short S16; +typedef int S32; +typedef long Long; +typedef long long S64; + +#define SXE_FALSE 0 +#define SXE_TRUE 1 + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif + +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t)(&((TYPE *)0)->MEMBER)) +#endif + + +#ifndef SXE_MIN +#define SXE_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + + +#ifndef SXE_MAX +#define SXE_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + + +#ifndef SXE_MIN_NON_ZERO +#define SXE_MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \ + ((b) == 0 ? (a) : (SXE_MIN(a, b)))) +#endif + +#ifndef TYPEOF +#ifdef __cplusplus +#define TYPEOF decltype +#else +#define TYPEOF typeof +#endif +#endif + +#ifndef container_of +#ifndef PCLINT +#define container_of(ptr, type, member) ({ \ + const TYPEOF( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#else +#define container_of(ptr, type, member) \ + ((type *)(void *)(char *)ptr) +#endif +#endif + +#ifndef SXE_DESC +#define SXE_DESC(a) 1 +#endif + + +#ifndef SXE_IN +#define SXE_IN +#endif + +#ifndef SXE_OUT +#define SXE_OUT +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.c b/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.c new file mode 100644 index 000000000000..af20cb66462b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.c @@ -0,0 +1,226 @@ + +#ifdef SXE_DRIVER_TRACE + +#include +#include + +#include "sxe_trace.h" +#include "sxe_ring.h" +#include "sxe_log.h" + +#define SXE_FILE_NAME_LEN (256) +#define SXE_TRACE_NS_MASK (0xFFFFFFFF) + +#define SXE_TRACE_BUF_CLEAN(buf, buf_size, len) \ + memset(buf, 0, buf_size); \ + len = 0; + +struct sxe_trace_tx_ring g_sxe_trace_tx[SXE_TXRX_RING_NUM_MAX] = {{ 0 }}; +struct sxe_trace_rx_ring g_sxe_trace_rx[SXE_TXRX_RING_NUM_MAX] = {{ 0 }}; + +void sxe_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab) +{ + if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) || unlikely(lab >= SXE_TRACE_LAB_TX_MAX)) { + return; + } + + if (unlikely(lab == 0)) { + g_sxe_trace_tx[ring_idx].next++; + g_sxe_trace_tx[ring_idx].next &= SXE_TRACE_PER_RING_MASK; + memset(&g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next], 0, + sizeof(g_sxe_trace_tx[ring_idx].timestamp[0])); + } + + g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next][lab] = ktime_get_real_ns() & SXE_TRACE_NS_MASK; +} + +void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab) +{ + if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) || unlikely(lab >= SXE_TRACE_LAB_RX_MAX)) { + return; + } + + if (unlikely(lab == 0)) { + g_sxe_trace_rx[ring_idx].next++; + g_sxe_trace_rx[ring_idx].next &= SXE_TRACE_PER_RING_MASK; + memset(&g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next], 0, + sizeof(g_sxe_trace_rx[ring_idx].timestamp[0])); + } + + g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next][lab] = ktime_get_real_ns() & SXE_TRACE_NS_MASK; +} + +static int sxe_trace_create_file(struct file **pp_file) +{ + char file_name[SXE_FILE_NAME_LEN] = {}; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int len = 0; + int rc = 0; + struct file *file; + + len += snprintf(file_name, sizeof(file_name), "%s.", SXE_TRACE_DUMP_FILE_NAME); + time_for_file_name(file_name + len, sizeof(file_name) - len); + + file = filp_open(file_name, flags_new, 0666); + if (IS_ERR(file)) { + rc = (int)PTR_ERR(file); + sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", file_name, rc); + goto l_out; + } + *pp_file = file; + +l_out: + return rc; +} + +static int sxe_trace_write_file(struct file *file) +{ + char * buff; + size_t buff_size = 2048; + int rc = 0; + int len = 0; + u64 spend = 0; + u64 times = 0; + u64 spend_total = 0; + u64 times_total = 0; + u64 start; + u64 end; + u32 i; + u32 j; + u32 k; + + buff = kzalloc(buff_size, GFP_KERNEL); + if (buff == NULL) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "kzalloc %lu failed.\n", buff_size); + goto l_out; + } + + len += snprintf(buff + len, buff_size - len, "tx trace dump:\n"); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + for (i = 0; i < ARRAY_SIZE(g_sxe_trace_tx); i++) { + spend = 0; + times = 0; + for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) { + start = g_sxe_trace_tx[i].timestamp[j][SXE_TRACE_LAB_TX_START]; + end = g_sxe_trace_tx[i].timestamp[j][SXE_TRACE_LAB_TX_END]; + if (start == 0 || end == 0) { + continue; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "\ttx ring %d trace %d dump:", i, j); + for (k = 0; k < SXE_TRACE_LAB_TX_MAX; k++) { + len += snprintf(buff + len, buff_size - len, "%llu ", g_sxe_trace_tx[i].timestamp[j][k]); + } + len += snprintf(buff + len, buff_size - len, "spend: %llu\n", end - start); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend += end - start; + times++; + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "tx ring %d, spend %llu, times:%llu.\n", i, spend, times); + spend_total += spend; + times_total += times; + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "tx trace dump, spend_total: %llu, times_total: %llu.\n", + spend_total, times_total); + + len += snprintf(buff + len, buff_size - len, "rx trace dump:\n"); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend_total = 0; + times_total = 0; + for (i = 0; i < ARRAY_SIZE(g_sxe_trace_rx); i++) { + spend = 0; + times = 0; + for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) { + start = g_sxe_trace_rx[i].timestamp[j][SXE_TRACE_LAB_RX_START]; + end = g_sxe_trace_rx[i].timestamp[j][SXE_TRACE_LAB_RX_END]; + if (start == 0 || end == 0) { + continue; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "\trx ring %d trace %d dump:", i, j); + for (k = 0; k < SXE_TRACE_LAB_RX_MAX; k++) { + len += snprintf(buff + len, buff_size - len, "%llu ", g_sxe_trace_rx[i].timestamp[j][k]); + } + len += snprintf(buff + len, buff_size - len, "spend: %llu\n", end - start); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend += end - start; + times++; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "rx ring %d, spend %llu, times:%llu:\n", i, spend, times); + spend_total += spend; + times_total += times; + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "rx trace dump, spend_total: %llu, times_total: %llu.\n", + spend_total, times_total); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + +l_out: + if (buff) { + kfree(buff); + } + if (rc < 0) { + sxe_print(KERN_ERR, NULL, "write file error %d\n", rc); + } + return rc; +} + +void sxe_trace_dump(void) +{ + struct file *file; + int rc = 0; + + rc = sxe_trace_create_file(&file); + if (file == NULL) { + goto l_out; + } + + rc = sxe_trace_write_file(file); + if (rc < 0) { + goto l_out; + } + +l_out: + if (file) { + sxe_file_close(&file); + } + return; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.h b/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.h new file mode 100644 index 000000000000..fdeb450b5028 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/base/trace/sxe_trace.h @@ -0,0 +1,77 @@ + +#ifndef __SXE_TRACE_H__ +#define __SXE_TRACE_H__ + +#ifdef SXE_DRIVER_TRACE + +#define SXE_TRACE_NUM_PER_RING (2048) +#define SXE_TRACE_PER_RING_MASK (0x7FF) + +#ifndef SXE_TEST +#define SXE_TRACE_DUMP_FILE_NAME ("/var/log/sxe_trace_dump.log") +#else +#define SXE_TRACE_DUMP_FILE_NAME (".sxe_trace_dump.log") +#endif + +enum sxe_trace_lab_tx { + SXE_TRACE_LAB_TX_START = 0, + SXE_TRACE_LAB_TX_MAY_STOP, + SXE_TRACE_LAB_TX_VLAN, + SXE_TRACE_LAB_TX_DCB, + SXE_TRACE_LAB_TX_IPSEC, + SXE_TRACE_LAB_TX_TSO, + SXE_TRACE_LAB_TX_DESC, + SXE_TRACE_LAB_TX_PPT, + SXE_TRACE_LAB_TX_FDIR, + SXE_TRACE_LAB_TX_OL_INFO, + SXE_TRACE_LAB_TX_MAP, + SXE_TRACE_LAB_TX_SENT, + SXE_TRACE_LAB_TX_UPDATE, + SXE_TRACE_LAB_TX_MAY_STOP_2, + SXE_TRACE_LAB_TX_WRITE, + SXE_TRACE_LAB_TX_END, + SXE_TRACE_LAB_TX_MAX, +}; + +struct sxe_trace_tx_ring { + u64 next; + u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_TX_MAX]; +}; + +enum sxe_trace_lab_rx { + SXE_TRACE_LAB_RX_START = 0, + SXE_TRACE_LAB_RX_CLEAN, + SXE_TRACE_LAB_RX_UNMAP, + SXE_TRACE_LAB_RX_STATS, + SXE_TRACE_LAB_RX_HANG, + SXE_TRACE_LAB_RX_DONE, + SXE_TRACE_LAB_RX_WAKE, + SXE_TRACE_LAB_RX_END, + SXE_TRACE_LAB_RX_MAX, +}; + +struct sxe_trace_rx_ring { + u64 next; + u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_RX_MAX]; +}; + +void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab); + +void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab); + +void sxe_trace_dump(void); + +#define SXE_TRACE_TX(r_idx, lab) \ + sxe_trace_tx_add(r_idx, lab) + +#define SXE_TRACE_RX(r_idx, lab) \ + sxe_trace_rx_add(r_idx, lab) + +#else +#define SXE_TRACE_TX(r_idx, lab) + +#define SXE_TRACE_RX(r_idx, lab) + +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/include/drv_msg.h b/drivers/net/ethernet/linkdata/sxe/include/drv_msg.h new file mode 100644 index 000000000000..027c88ebfc23 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/drv_msg.h @@ -0,0 +1,19 @@ + +#ifndef __DRV_MSG_H__ +#define __DRV_MSG_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_VERSION_LEN 32 + + + + + +typedef struct sxe_version_resp { + U8 fw_version[SXE_VERSION_LEN]; +}sxe_version_resp_s; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/mgl/sxe_port.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/mgl/sxe_port.h new file mode 100644 index 000000000000..d2dfff6a5848 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/mgl/sxe_port.h @@ -0,0 +1,37 @@ +#ifndef __SXE_PORT_H__ +#define __SXE_PORT_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "mgc_types.h" +#include "ps3_types.h" + +typedef enum MglPortCmdSetCode{ + MGL_CMD_PORT_SET_BASE = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0), + MGL_CMD_PORT_SET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1), + MGL_CMD_PORT_SET_LED = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2), + MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3), + MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4), + MGL_CMD_SXE_SOC_RST = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5), + MGL_CMD_SXE_SET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6), + MGL_CMD_SXE_SET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7), + MGL_CMD_SXE_OPT_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8), +} MglPortCmdSetCode_e; + +typedef enum MglPortCmdGetCode{ + MGL_CMD_SXE_GET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0), + MGL_CMD_SXE_GET_SOC_INFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1), + MGL_CMD_SXE_LOG_EXPORT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2), + MGL_CMD_SXE_REGS_DUMP = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3), + MGL_CMD_SXE_GET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4), + MGL_CMD_SXE_MAC_ADDR_GET = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5), + MGL_CMD_SXE_GET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6), +} MglPortCmdGetCode_e; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_cli.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_cli.h new file mode 100644 index 000000000000..f11b3343afe8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_cli.h @@ -0,0 +1,210 @@ + +#ifndef __SXE_CLI_H__ +#define __SXE_CLI_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_VERION_LEN (32) +#define SXE_MAC_NUM (128) +#define SXE_PORT_TRANSCEIVER_LEN (32) +#define SXE_PORT_VENDOR_LEN (32) +#define SXE_CHIP_TYPE_LEN (32) +#define SXE_VPD_SN_LEN (16) +#define SXE_SOC_RST_TIME (0x93A80) +#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3) +#define MGC_TERMLOG_INFO_MAX_LEN (12 * 1024) +#define SXE_REGS_DUMP_MAX_LEN (12 * 1024) +#define SXE_PRODUCT_NAME_LEN (32) + +typedef enum sxe_led_mode { + SXE_IDENTIFY_LED_BLINK_ON = 0, + SXE_IDENTIFY_LED_BLINK_OFF, + SXE_IDENTIFY_LED_ON, + SXE_IDENTIFY_LED_OFF, + SXE_IDENTIFY_LED_RESET, +} sxe_led_mode_s; + +typedef struct sxe_led_ctrl { + U32 mode; + U32 duration; + +} sxe_led_ctrl_s; + +typedef struct sxe_led_ctrl_resp { + U32 ack; +} sxe_led_ctrl_resp_s; + +typedef enum PortLinkSpeed { + PORT_LINK_NO = 0, + PORT_LINK_100M = 1, + PORT_LINK_1G = 2, + PORT_LINK_10G = 3, +} PortLinkSpeed_e; + +typedef struct SysSocInfo { + S8 fwVer[SXE_VERION_LEN]; + S8 optVer[SXE_VERION_LEN]; + U8 socStatus; + U8 pad[3]; + S32 socTemp; + U64 chipId; + S8 chipType[SXE_CHIP_TYPE_LEN]; + S8 pba[SXE_VPD_SN_LEN]; + S8 productName[SXE_PRODUCT_NAME_LEN]; +} SysSocInfo_s; + +typedef struct SysPortInfo { + U64 mac[SXE_MAC_NUM]; + U8 isPortAbs; + U8 linkStat; + U8 linkSpeed; + + + U8 isSfp:1; + U8 isGetInfo:1; + U8 rvd:6; + S8 opticalModTemp; + U8 pad[3]; + S8 transceiverType[SXE_PORT_TRANSCEIVER_LEN]; + S8 vendorName[SXE_PORT_VENDOR_LEN]; + S8 vendorPn[SXE_PORT_VENDOR_LEN]; +} SysPortInfo_s; + +typedef struct SysInfoResp { + SysSocInfo_s socInfo; + SysPortInfo_s portInfo; +} SysInfoResp_s; + +typedef enum SfpTempTdMode { + SFP_TEMP_THRESHOLD_MODE_ALARM = 0, + SFP_TEMP_THRESHOLD_MODE_WARN, +} SfpTempTdMode_e; + +typedef struct SfpTempTdSet{ + U8 mode; + U8 pad[3]; + S8 hthreshold; + S8 lthreshold; +} SfpTempTdSet_s; + +typedef struct SxeLogExportResp { + U16 curLogLen; + U8 isEnd; + U8 pad; + S32 sessionId; + S8 data[0]; +} SxeLogExportResp_s; + +typedef enum SxeLogExportType { + SXE_LOG_EXPORT_REQ = 0, + SXE_LOG_EXPORT_FIN, + SXE_LOG_EXPORT_ABORT, +} SxeLogExportType_e; + +typedef struct SxeLogExportReq { + U8 isALLlog; + U8 cmdtype; + U8 isBegin; + U8 pad; + S32 sessionId; + U32 logLen; +} SxeLogExportReq_s; + +typedef struct SocRstReq { + U32 time; +} SocRstReq_s; + +typedef struct RegsDumpResp { + U32 curdwLen; + U8 data[0]; +} RegsDumpResp_s; + +enum { + SXE_MFG_PART_NUMBER_LEN = 8, + SXE_MFG_SERIAL_NUMBER_LEN = 16, + SXE_MFG_REVISION_LEN = 4, + SXE_MFG_OEM_STR_LEN = 64, + SXE_MFG_SXE_BOARD_ASSEMBLY_LEN = 32, + SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16, + SXE_MFG_SXE_MAC_ADDR_CNT = 2, +}; + +typedef struct sxeMfgInfo { + U8 partNumber[SXE_MFG_PART_NUMBER_LEN]; + U8 serialNumber [SXE_MFG_SERIAL_NUMBER_LEN]; + U32 mfgDate; + U8 revision[SXE_MFG_REVISION_LEN]; + U32 reworkDate; + U8 pad[4]; + U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT]; + U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN]; + U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN]; + U8 extra1[SXE_MFG_OEM_STR_LEN]; + U8 extra2[SXE_MFG_OEM_STR_LEN]; +} sxeMfgInfo_t; + +typedef struct RegsDumpReq { + U32 baseAddr; + U32 dwLen; +} RegsDumpReq_s; + +typedef enum sxe_pcs_mode { + SXE_PCS_MODE_1000BASE_KX_WO = 0, + SXE_PCS_MODE_1000BASE_KX_W, + SXE_PCS_MODE_SGMII, + SXE_PCS_MODE_10GBASE_KR_WO, + SXE_PCS_MODE_AUTO_NEGT_73, + SXE_PCS_MODE_LPBK_PHY_TX2RX, + SXE_PCS_MODE_LPBK_PHY_RX2TX, + SXE_PCS_MODE_LPBK_PCS_RX2TX, + SXE_PCS_MODE_BUTT, +} sxe_pcs_mode_e; + +typedef enum sxe_remote_fault_mode { + SXE_REMOTE_FALUT_NO_ERROR = 0, + SXE_REMOTE_FALUT_OFFLINE, + SXE_REMOTE_FALUT_LINK_FAILURE, + SXE_REMOTE_FALUT_AUTO_NEGOTIATION, + SXE_REMOTE_UNKNOWN, +} sxe_remote_fault_e; + +typedef struct sxe_phy_cfg { + sxe_pcs_mode_e mode; + U32 mtu; +} sxe_pcs_cfg_s; + +typedef enum sxe_an_speed { + SXE_AN_SPEED_NO_LINK = 0, + SXE_AN_SPEED_100M, + SXE_AN_SPEED_1G, + SXE_AN_SPEED_10G, + SXE_AN_SPEED_UNKNOWN, +} sxe_an_speed_e; + +typedef enum sxe_phy_pause_cap { + SXE_PAUSE_CAP_NO_PAUSE = 0, + SXE_PAUSE_CAP_ASYMMETRIC_PAUSE, + SXE_PAUSE_CAP_SYMMETRIC_PAUSE, + SXE_PAUSE_CAP_BOTH_PAUSE, + SXE_PAUSE_CAP_UNKNOWN, +} sxe_phy_pause_cap_e; + +typedef enum sxe_phy_duplex_type { + SXE_FULL_DUPLEX = 0, + SXE_HALF_DUPLEX = 1, + SXE_UNKNOWN_DUPLEX, +} sxe_phy_duplex_type_e; + +typedef struct sxe_phy_an_cap { + sxe_remote_fault_e remote_fault; + sxe_phy_pause_cap_e pause_cap; + sxe_phy_duplex_type_e duplex_cap; +} sxe_phy_an_cap_s; + +typedef struct sxe_an_cap { + sxe_phy_an_cap_s local; + sxe_phy_an_cap_s peer; +} sxe_an_cap_s; +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_hdc.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_hdc.h new file mode 100644 index 000000000000..0df074aa01c0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_hdc.h @@ -0,0 +1,40 @@ + +#ifndef __SXE_HDC_H__ +#define __SXE_HDC_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define HDC_CACHE_TOTAL_LEN (16 *1024) +#define ONE_PACKET_LEN_MAX (1024) +#define DWORD_NUM (256) +#define HDC_TRANS_RETRY_COUNT (3) + + +typedef enum SxeHdcErrnoCode { + PKG_OK = 0, + PKG_ERR_REQ_LEN, + PKG_ERR_RESP_LEN, + PKG_ERR_PKG_SKIP, + PKG_ERR_NODATA, + PKG_ERR_PF_LK, + PKG_ERR_OTHER, +} SxeHdcErrnoCode_e; + +typedef union HdcHeader { + struct { + U8 pid:4; + U8 errCode:4; + U8 len; + U16 startPkg:1; + U16 endPkg:1; + U16 isRd:1; + U16 msi:1; + U16 totalLen:12; + } head; + U32 dw0; +} HdcHeader_u; + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_ioctl.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_ioctl.h new file mode 100644 index 000000000000..88e59b2cc658 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_ioctl.h @@ -0,0 +1,18 @@ +#ifndef _SXE_IOCTL_H_ +#define _SXE_IOCTL_H_ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +struct SxeIoctlSyncCmd { + U64 traceid; + void *inData; + U32 inLen; + void *outData; + U32 outLen; +}; + +#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_msg.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_msg.h new file mode 100644 index 000000000000..113a9136c27a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_msg.h @@ -0,0 +1,136 @@ + +#ifndef __SXE_MSG_H__ +#define __SXE_MSG_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_MAC_ADDR_LEN 6 + +#define SXE_HDC_CMD_HDR_SIZE sizeof(struct sxe_hdc_cmd_hdr) +#define SXE_HDC_MSG_HDR_SIZE sizeof(struct sxe_hdc_drv_cmd_msg) + +enum sxe_cmd_type { + SXE_CMD_TYPE_CLI, + SXE_CMD_TYPE_DRV, + SXE_CMD_TYPE_UNKOWN, +}; + +typedef struct sxe_hdc_cmd_hdr { + U8 cmd_type; + U8 cmd_sub_type; + U8 reserve[6]; +}sxe_hdc_cmd_hdr_s; + + + +typedef enum SxeFWState { + SXE_FW_START_STATE_UNDEFINED = 0x00, + SXE_FW_START_STATE_INIT_BASE = 0x10, + SXE_FW_START_STATE_SCAN_DEVICE = 0x20, + SXE_FW_START_STATE_FINISHED = 0x30, + SXE_FW_START_STATE_UPGRADE = 0x31, + SXE_FW_RUNNING_STATE_ABNOMAL = 0x40, + SXE_FW_START_STATE_MASK = 0xF0, +}SxeFWState_e; + +typedef struct SxeFWStateInfo { + U8 socStatus; + char statBuff[32]; +} SxeFWStateInfo_s; + + +typedef enum MsiEvt { + MSI_EVT_SOC_STATUS = 0x1, + MSI_EVT_HDC_FWOV = 0x2, + MSI_EVT_HDC_TIME_SYNC = 0x4, + + MSI_EVT_MAX = 0x80000000, +} MsiEvt_u; + + +typedef enum SxeFwHdcState { + SXE_FW_HDC_TRANSACTION_IDLE = 0x01, + SXE_FW_HDC_TRANSACTION_BUSY, + + SXE_FW_HDC_TRANSACTION_ERR, +} SxeFwHdcState_e; + +enum sxe_hdc_cmd_opcode { + SXE_CMD_SET_WOL = 1, + SXE_CMD_LED_CTRL, + SXE_CMD_SFP_READ, + SXE_CMD_SFP_WRITE, + SXE_CMD_TX_DIS_CTRL = 5, + SXE_CMD_TINE_SYNC, + SXE_CMD_RATE_SELECT, + SXE_CMD_R0_MAC_GET, + SXE_CMD_LOG_EXPORT, + SXE_CMD_FW_VER_GET = 10, + SXE_CMD_PCS_SDS_INIT, + SXE_CMD_AN_SPEED_GET, + SXE_CMD_AN_CAP_GET, + SXE_CMD_GET_SOC_INFO, + SXE_CMD_MNG_RST = 15, + + SXE_CMD_MAX, +}; + +enum sxe_hdc_cmd_errcode { + SXE_ERR_INVALID_PARAM = 1, +}; + +typedef struct sxe_hdc_drv_cmd_msg { + + U16 opcode; + U16 errcode; + union dataLength { + U16 req_len; + U16 ack_len; + } length; + U8 reserve[8]; + U64 traceid; + U8 body[0]; +} sxe_hdc_drv_cmd_msg_s; + + +typedef struct sxe_sfp_rw_req { + U16 offset; + U16 len; + U8 write_data[0]; +} sxe_sfp_rw_req_s; + + +typedef struct sxe_sfp_read_resp { + U16 len; + U8 resp[0]; +} sxe_sfp_read_resp_s; + +typedef enum sxe_sfp_rate{ + SXE_SFP_RATE_1G = 0, + SXE_SFP_RATE_10G = 1, +} sxe_sfp_rate_e; + + +typedef struct sxe_sfp_rate_able { + sxe_sfp_rate_e rate; +} sxe_sfp_rate_able_s; + + +typedef struct sxe_spp_tx_able { + BOOL isDisable; +} sxe_spp_tx_able_s; + + +typedef struct sxe_default_mac_addr_resp { + U8 addr[SXE_MAC_ADDR_LEN]; +} sxe_default_mac_addr_resp_s; + + +typedef struct sxe_mng_rst { + BOOL enable; +} sxe_mng_rst_s; + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_regs.h b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_regs.h new file mode 100644 index 000000000000..aa68a0047f7e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe/sxe_regs.h @@ -0,0 +1,1273 @@ + +#ifndef __SXE_REGS_H__ +#define __SXE_REGS_H__ + +#define SXE_LINKSEC_MAX_SC_COUNT 1 +#define SXE_LINKSEC_MAX_SA_COUNT 2 + +#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 + + +#define SXE_REG_READ_FAIL 0xffffffffU +#define SXE_REG_READ_RETRY 5 +#ifdef SXE_TEST +#define SXE_PCI_MASTER_DISABLE_TIMEOUT (1) +#else +#define SXE_PCI_MASTER_DISABLE_TIMEOUT (800) +#endif + + +#define SXE_CTRL 0x00000 +#define SXE_STATUS 0x00008 +#define SXE_CTRL_EXT 0x00018 + + +#define SXE_CTRL_LNK_RST 0x00000008 +#define SXE_CTRL_RST 0x04000000 + +#ifdef SXE_TEST +#define SXE_CTRL_RST_MASK (0) +#define SXE_CTRL_GIO_DIS (0) +#else +#define SXE_CTRL_RST_MASK (SXE_CTRL_LNK_RST | SXE_CTRL_RST) +#define SXE_CTRL_GIO_DIS 0x00000004 +#endif + + +#define SXE_STATUS_GIO 0x00080000 + + +#define SXE_CTRL_EXT_PFRSTD 0x00004000 +#define SXE_CTRL_EXT_NS_DIS 0x00010000 +#define SXE_CTRL_EXT_DRV_LOAD 0x10000000 + + +#define SXE_FCRTL(_i) (0x03220 + ((_i) * 4)) +#define SXE_FCRTH(_i) (0x03260 + ((_i) * 4)) +#define SXE_FCCFG 0x03D00 + + +#define SXE_FCRTL_XONE 0x80000000 +#define SXE_FCRTH_FCEN 0x80000000 + +#define SXE_FCCFG_TFCE_802_3X 0x00000008 +#define SXE_FCCFG_TFCE_PRIORITY 0x00000010 + + +#define SXE_GCR_EXT 0x11050 + + +#define SXE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define SXE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define SXE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define SXE_GCR_CAP_VER2 0x00040000 +#define SXE_GCR_EXT_MSIX_EN 0x80000000 +#define SXE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define SXE_GCR_EXT_VT_MODE_16 0x00000001 +#define SXE_GCR_EXT_VT_MODE_32 0x00000002 +#define SXE_GCR_EXT_VT_MODE_64 0x00000003 +#define SXE_GCR_EXT_VT_MODE_MASK 0x00000003 +#define SXE_GCR_EXT_SRIOV (SXE_GCR_EXT_MSIX_EN | \ + SXE_GCR_EXT_VT_MODE_64) + +#define SXE_PCI_DEVICE_STATUS 0x7A +#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define SXE_PCI_LINK_STATUS 0x82 +#define SXE_PCI_DEVICE_CONTROL2 0x98 +#define SXE_PCI_LINK_WIDTH 0x3F0 +#define SXE_PCI_LINK_WIDTH_1 0x10 +#define SXE_PCI_LINK_WIDTH_2 0x20 +#define SXE_PCI_LINK_WIDTH_4 0x40 +#define SXE_PCI_LINK_WIDTH_8 0x80 +#define SXE_PCI_LINK_SPEED 0xF +#define SXE_PCI_LINK_SPEED_2500 0x1 +#define SXE_PCI_LINK_SPEED_5000 0x2 +#define SXE_PCI_LINK_SPEED_8000 0x3 +#define SXE_PCI_HEADER_TYPE_REGISTER 0x0E +#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define SXE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define SXE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define SXE_PCIDEVCTRL2_16_32ms_def 0x0 +#define SXE_PCIDEVCTRL2_50_100us 0x1 +#define SXE_PCIDEVCTRL2_1_2ms 0x2 +#define SXE_PCIDEVCTRL2_16_32ms 0x5 +#define SXE_PCIDEVCTRL2_65_130ms 0x6 +#define SXE_PCIDEVCTRL2_260_520ms 0x9 +#define SXE_PCIDEVCTRL2_1_2s 0xa +#define SXE_PCIDEVCTRL2_4_8s 0xd +#define SXE_PCIDEVCTRL2_17_34s 0xe + + +#define SXE_EICR 0x00800 +#define SXE_EICS 0x00808 +#define SXE_EIMS 0x00880 +#define SXE_EIMC 0x00888 +#define SXE_EIAC 0x00810 +#define SXE_EIAM 0x00890 +#define SXE_EITRSEL 0x00894 +#define SXE_GPIE 0x00898 +#define SXE_IVAR(i) (0x00900 + (i) * 4) +#define SXE_IVAR_MISC 0x00A00 +#define SXE_EICS_EX(i) (0x00A90 + (i) * 4) +#define SXE_EIMS_EX(i) (0x00AA0 + (i) * 4) +#define SXE_EIMC_EX(i) (0x00AB0 + (i) * 4) +#define SXE_EIAM_EX(i) (0x00AD0 + (i) * 4) +#define SXE_EITR(i) (((i) <= 23) ? (0x00820 + ((i) * 4)) : \ + (0x012300 + (((i) - 24) * 4))) + +#define SXE_SPP_PROC 0x00AD8 +#define SXE_SPP_STATE 0x00AF4 + + + +#define SXE_EICR_RTX_QUEUE 0x0000FFFF +#define SXE_EICR_FLOW_NAV 0x00010000 +#define SXE_EICR_MAILBOX 0x00080000 +#define SXE_EICR_LSC 0x00100000 +#define SXE_EICR_LINKSEC 0x00200000 +#define SXE_EICR_ECC 0x10000000 +#define SXE_EICR_HDC 0x20000000 +#define SXE_EICR_TCP_TIMER 0x40000000 +#define SXE_EICR_OTHER 0x80000000 + + +#define SXE_EICS_RTX_QUEUE SXE_EICR_RTX_QUEUE +#define SXE_EICS_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EICS_MAILBOX SXE_EICR_MAILBOX +#define SXE_EICS_LSC SXE_EICR_LSC +#define SXE_EICS_ECC SXE_EICR_ECC +#define SXE_EICS_HDC SXE_EICR_HDC +#define SXE_EICS_TCP_TIMER SXE_EICR_TCP_TIMER +#define SXE_EICS_OTHER SXE_EICR_OTHER + + +#define SXE_EIMS_RTX_QUEUE SXE_EICR_RTX_QUEUE +#define SXE_EIMS_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EIMS_MAILBOX SXE_EICR_MAILBOX +#define SXE_EIMS_LSC SXE_EICR_LSC +#define SXE_EIMS_ECC SXE_EICR_ECC +#define SXE_EIMS_HDC SXE_EICR_HDC +#define SXE_EIMS_TCP_TIMER SXE_EICR_TCP_TIMER +#define SXE_EIMS_OTHER SXE_EICR_OTHER +#define SXE_EIMS_ENABLE_MASK (SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \ + SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER) + +#define SXE_EIMC_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EIMC_LSC SXE_EICR_LSC +#define SXE_EIMC_HDC SXE_EICR_HDC + + +#define SXE_GPIE_SPP0_EN 0x00000001 +#define SXE_GPIE_SPP1_EN 0x00000002 +#define SXE_GPIE_SPP2_EN 0x00000004 +#define SXE_GPIE_MSIX_MODE 0x00000010 +#define SXE_GPIE_OCD 0x00000020 +#define SXE_GPIE_EIMEN 0x00000040 +#define SXE_GPIE_EIAME 0x40000000 +#define SXE_GPIE_PBA_SUPPORT 0x80000000 +#define SXE_GPIE_VTMODE_MASK 0x0000C000 +#define SXE_GPIE_VTMODE_16 0x00004000 +#define SXE_GPIE_VTMODE_32 0x00008000 +#define SXE_GPIE_VTMODE_64 0x0000C000 + + +#define SXE_IVAR_ALLOC_VALID 0x80 + + +#define SXE_EITR_CNT_WDIS 0x80000000 +#define SXE_EITR_ITR_MASK 0x00000FF8 +#define SXE_EITR_ITR_SHIFT 2 +#define SXE_EITR_ITR_MAX (SXE_EITR_ITR_MASK >> SXE_EITR_ITR_SHIFT) + + +#define SXE_EICR_GPI_SPP0 0x01000000 +#define SXE_EICR_GPI_SPP1 0x02000000 +#define SXE_EICR_GPI_SPP2 0x04000000 +#define SXE_EIMS_GPI_SPP0 SXE_EICR_GPI_SPP0 +#define SXE_EIMS_GPI_SPP1 SXE_EICR_GPI_SPP1 +#define SXE_EIMS_GPI_SPP2 SXE_EICR_GPI_SPP2 + + +#define SXE_SPP_PROC_SPP2_TRIGGER 0x00300000 +#define SXE_SPP_PROC_SPP2_TRIGGER_MASK 0xFFCFFFFF +#define SXE_SPP_PROC_DELAY_US_MASK 0x0000FFFF +#define SXE_SPP_PROC_DELAY_US 0x00000007 + + +#define SXE_IRQ_CLEAR_MASK 0xFFFFFFFF + + +#define SXE_RXCSUM 0x05000 +#define SXE_RFCTL 0x05008 +#define SXE_FCTRL 0x05080 +#define SXE_EXVET 0x05078 +#define SXE_VLNCTRL 0x05088 +#define SXE_MCSTCTRL 0x05090 +#define SXE_ETQF(_i) (0x05128 + ((_i) * 4)) +#define SXE_ETQS(_i) (0x0EC00 + ((_i) * 4)) +#define SXE_SYNQF 0x0EC30 +#define SXE_MTA(_i) (0x05200 + ((_i) * 4)) +#define SXE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define SXE_VFTA(_i) (0x0A000 + ((_i) * 4)) +#define SXE_RAL(_i) (0x0A200 + ((_i) * 8)) +#define SXE_RAH(_i) (0x0A204 + ((_i) * 8)) +#define SXE_MPSAR_LOW(_i) (0x0A600 + ((_i) * 8)) +#define SXE_MPSAR_HIGH(_i) (0x0A604 + ((_i) * 8)) +#define SXE_PSRTYPE(_i) (0x0EA00 + ((_i) * 4)) +#define SXE_RETA(_i) (0x0EB00 + ((_i) * 4)) +#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4)) +#define SXE_RQTC 0x0EC70 +#define SXE_MRQC 0x0EC80 +#define SXE_IEOI 0x0F654 +#define SXE_PL 0x0F658 +#define SXE_LPL 0x0F65C + + +#define SXE_ETQF_CNT 8 +#define SXE_MTA_CNT 128 +#define SXE_UTA_CNT 128 +#define SXE_VFTA_CNT 128 +#define SXE_RAR_CNT 128 +#define SXE_MPSAR_CNT 128 + + +#define SXE_EXVET_DEFAULT 0x81000000 +#define SXE_VLNCTRL_DEFAULT 0x8100 +#define SXE_IEOI_DEFAULT 0x060005DC +#define SXE_PL_DEFAULT 0x3e000016 +#define SXE_LPL_DEFAULT 0x26000000 + + +#define SXE_RXCSUM_IPPCSE 0x00001000 +#define SXE_RXCSUM_PCSD 0x00002000 + + +#define SXE_RFCTL_LRO_DIS 0x00000020 +#define SXE_RFCTL_NFSW_DIS 0x00000040 +#define SXE_RFCTL_NFSR_DIS 0x00000080 + + +#define SXE_FCTRL_SBP 0x00000002 +#define SXE_FCTRL_MPE 0x00000100 +#define SXE_FCTRL_UPE 0x00000200 +#define SXE_FCTRL_BAM 0x00000400 +#define SXE_FCTRL_PMCF 0x00001000 +#define SXE_FCTRL_DPF 0x00002000 + + +#define SXE_VLNCTRL_VET 0x0000FFFF +#define SXE_VLNCTRL_CFI 0x10000000 +#define SXE_VLNCTRL_CFIEN 0x20000000 +#define SXE_VLNCTRL_VFE 0x40000000 +#define SXE_VLNCTRL_VME 0x80000000 + +#define SXE_EXVET_VET_EXT_SHIFT 16 +#define SXE_EXTENDED_VLAN (1 << 26) + + +#define SXE_MCSTCTRL_MFE 4 + +#define SXE_ETQF_FILTER_EAPOL 0 +#define SXE_ETQF_FILTER_1588 3 +#define SXE_ETQF_FILTER_FIP 4 +#define SXE_ETQF_FILTER_LLDP 5 +#define SXE_ETQF_FILTER_LACP 6 +#define SXE_ETQF_FILTER_FC 7 +#define SXE_MAX_ETQF_FILTERS 8 +#define SXE_ETQF_1588 0x40000000 +#define SXE_ETQF_FILTER_EN 0x80000000 +#define SXE_ETQF_POOL_ENABLE BIT(26) +#define SXE_ETQF_POOL_SHIFT 20 + + +#define SXE_ETQS_RX_QUEUE 0x007F0000 +#define SXE_ETQS_RX_QUEUE_SHIFT 16 +#define SXE_ETQS_LLI 0x20000000 +#define SXE_ETQS_QUEUE_EN 0x80000000 + + +#define SXE_SYN_FILTER_ENABLE 0x00000001 +#define SXE_SYN_FILTER_QUEUE 0x000000FE +#define SXE_SYN_FILTER_QUEUE_SHIFT 1 +#define SXE_SYN_FILTER_SYNQFP 0x80000000 + + +#define SXE_RAH_VIND_MASK 0x003C0000 +#define SXE_RAH_VIND_SHIFT 18 +#define SXE_RAH_AV 0x80000000 +#define SXE_CLEAR_VMDQ_ALL 0xFFFFFFFF + + +#define SXE_PSRTYPE_TCPHDR 0x00000010 +#define SXE_PSRTYPE_UDPHDR 0x00000020 +#define SXE_PSRTYPE_IPV4HDR 0x00000100 +#define SXE_PSRTYPE_IPV6HDR 0x00000200 +#define SXE_PSRTYPE_L2HDR 0x00001000 + + +#define SXE_MRQC_RSSEN 0x00000001 +#define SXE_MRQC_MRQE_MASK 0xF +#define SXE_MRQC_RT8TCEN 0x00000002 +#define SXE_MRQC_RT4TCEN 0x00000003 +#define SXE_MRQC_RTRSS8TCEN 0x00000004 +#define SXE_MRQC_RTRSS4TCEN 0x00000005 +#define SXE_MRQC_VMDQEN 0x00000008 +#define SXE_MRQC_VMDQRSS32EN 0x0000000A +#define SXE_MRQC_VMDQRSS64EN 0x0000000B +#define SXE_MRQC_VMDQRT8TCEN 0x0000000C +#define SXE_MRQC_VMDQRT4TCEN 0x0000000D +#define SXE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define SXE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define SXE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define SXE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define SXE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define SXE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define SXE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 + + +#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define SXE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40))) +#define SXE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define SXE_LROCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define SXE_RDRXCTL 0x02F00 +#define SXE_RXCTRL 0x03000 +#define SXE_LRODBU 0x03028 +#define SXE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + +#define SXE_DRXCFG (0x03C20) + + +#define SXE_RXDCTL_CNT 128 + + +#define SXE_RXDCTL_DEFAULT 0x40210 + + +#define SXE_SRRCTL_DROP_EN 0x10000000 +#define SXE_SRRCTL_BSIZEPKT_SHIFT (10) +#define SXE_SRRCTL_BSIZEHDRSIZE_SHIFT (2) +#define SXE_SRRCTL_DESCTYPE_DATA_ONEBUF 0x02000000 +#define SXE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define SXE_SRRCTL_BSIZEHDR_MASK 0x00003F00 + + +#define SXE_RXDCTL_ENABLE 0x02000000 +#define SXE_RXDCTL_SWFLSH 0x04000000 +#define SXE_RXDCTL_VME 0x40000000 +#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT 8 +#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16 + + +#define SXE_LROCTL_LROEN 0x01 +#define SXE_LROCTL_MAXDESC_1 0x00 +#define SXE_LROCTL_MAXDESC_4 0x04 +#define SXE_LROCTL_MAXDESC_8 0x08 +#define SXE_LROCTL_MAXDESC_16 0x0C + + +#define SXE_RDRXCTL_RDMTS_1_2 0x00000000 +#define SXE_RDRXCTL_RDMTS_EN 0x00200000 +#define SXE_RDRXCTL_CRCSTRIP 0x00000002 +#define SXE_RDRXCTL_PSP 0x00000004 +#define SXE_RDRXCTL_MVMEN 0x00000020 +#define SXE_RDRXCTL_DMAIDONE 0x00000008 +#define SXE_RDRXCTL_AGGDIS 0x00010000 +#define SXE_RDRXCTL_LROFRSTSIZE 0x003E0000 +#define SXE_RDRXCTL_LROLLIDIS 0x00800000 +#define SXE_RDRXCTL_LROACKC 0x02000000 +#define SXE_RDRXCTL_FCOE_WRFIX 0x04000000 +#define SXE_RDRXCTL_MBINTEN 0x10000000 +#define SXE_RDRXCTL_MDP_EN 0x20000000 +#define SXE_RDRXCTL_MPBEN 0x00000010 + +#define SXE_RDRXCTL_MCEN 0x00000040 + + + +#define SXE_RXCTRL_RXEN 0x00000001 + + +#define SXE_LRODBU_LROACKDIS 0x00000080 + + +#define SXE_DRXCFG_GSP_ZERO 0x00000002 +#define SXE_DRXCFG_DBURX_START 0x00000001 + + +#define SXE_DMATXCTL 0x04A80 +#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) +#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define SXE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define SXE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define SXE_PVFTDWBAL(p) (0x06038 + (0x40 * (p))) +#define SXE_PVFTDWBAH(p) (0x0603C + (0x40 * (p))) +#define SXE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) +#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) +#define SXE_MTQC 0x08120 +#define SXE_TXPBFCS 0x0CE00 +#define SXE_DTXCFG 0x0CE08 +#define SXE_DTMPCNT 0x0CE98 + + +#define SXE_DMATXCTL_DEFAULT 0x81000000 + + +#define SXE_DMATXCTL_TE 0x1 +#define SXE_DMATXCTL_GDV 0x8 +#define SXE_DMATXCTL_VT_SHIFT 16 +#define SXE_DMATXCTL_VT_MASK 0xFFFF0000 + + +#define SXE_TXDCTL_HTHRESH_SHIFT 8 +#define SXE_TXDCTL_WTHRESH_SHIFT 16 +#define SXE_TXDCTL_ENABLE 0x02000000 +#define SXE_TXDCTL_SWFLSH 0x04000000 + +#define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \ + SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx) +#define SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, vf_ring_idx) \ + SXE_PVFTDWBAH((ring_per_pool) * (vf_idx) + vf_ring_idx) + + +#define SXE_MTQC_RT_ENA 0x1 +#define SXE_MTQC_VT_ENA 0x2 +#define SXE_MTQC_64Q_1PB 0x0 +#define SXE_MTQC_32VF 0x8 +#define SXE_MTQC_64VF 0x4 +#define SXE_MTQC_8TC_8TQ 0xC +#define SXE_MTQC_4TC_4TQ 0x8 + + +#define SXE_TFCS_PB0_MASK 0x1 +#define SXE_TFCS_PB1_MASK 0x2 +#define SXE_TFCS_PB2_MASK 0x4 +#define SXE_TFCS_PB3_MASK 0x8 +#define SXE_TFCS_PB4_MASK 0x10 +#define SXE_TFCS_PB5_MASK 0x20 +#define SXE_TFCS_PB6_MASK 0x40 +#define SXE_TFCS_PB7_MASK 0x80 +#define SXE_TFCS_PB_MASK 0xff + + +#define SXE_DTXCFG_DBUTX_START 0x00000001 +#define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG 0x20 + + +#define SXE_RTRPCS 0x02430 +#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) +#define SXE_RTRUP2TC 0x03020 +#define SXE_RTTDCS 0x04900 +#define SXE_RTTDQSEL 0x04904 +#define SXE_RTTDT1C 0x04908 +#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) +#define SXE_RTTBCNRM 0x04980 +#define SXE_RTTBCNRC 0x04984 +#define SXE_RTTUP2TC 0x0C800 +#define SXE_RTTPCS 0x0CD00 +#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) + + +#define SXE_RTRPCS_RRM 0x00000002 +#define SXE_RTRPCS_RAC 0x00000004 +#define SXE_RTRPCS_ARBDIS 0x00000040 + + +#define SXE_RTRPT4C_MCL_SHIFT 12 +#define SXE_RTRPT4C_BWG_SHIFT 9 +#define SXE_RTRPT4C_GSP 0x40000000 +#define SXE_RTRPT4C_LSP 0x80000000 + + +#define SXE_RTRUP2TC_UP_SHIFT 3 +#define SXE_RTRUP2TC_UP_MASK 7 + + +#define SXE_RTTDCS_ARBDIS 0x00000040 +#define SXE_RTTDCS_TDPAC 0x00000001 + +#define SXE_RTTDCS_VMPAC 0x00000002 + +#define SXE_RTTDCS_TDRM 0x00000010 +#define SXE_RTTDCS_ARBDIS 0x00000040 +#define SXE_RTTDCS_BDPM 0x00400000 +#define SXE_RTTDCS_BPBFSM 0x00800000 + +#define SXE_RTTDCS_SPEED_CHG 0x80000000 + + +#define SXE_RTTDT2C_MCL_SHIFT 12 +#define SXE_RTTDT2C_BWG_SHIFT 9 +#define SXE_RTTDT2C_GSP 0x40000000 +#define SXE_RTTDT2C_LSP 0x80000000 + + +#define SXE_RTTBCNRC_RS_ENA 0x80000000 +#define SXE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define SXE_RTTBCNRC_RF_INT_SHIFT 14 +#define SXE_RTTBCNRC_RF_INT_MASK \ + (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT) + + +#define SXE_RTTUP2TC_UP_SHIFT 3 + + +#define SXE_RTTPCS_TPPAC 0x00000020 + +#define SXE_RTTPCS_ARBDIS 0x00000040 +#define SXE_RTTPCS_TPRM 0x00000100 +#define SXE_RTTPCS_ARBD_SHIFT 22 +#define SXE_RTTPCS_ARBD_DCB 0x4 + + +#define SXE_RTTPT2C_MCL_SHIFT 12 +#define SXE_RTTPT2C_BWG_SHIFT 9 +#define SXE_RTTPT2C_GSP 0x40000000 +#define SXE_RTTPT2C_LSP 0x80000000 + + +#define SXE_TPH_CTRL 0x11074 +#define SXE_TPH_TXCTRL(_i) (0x0600C + ((_i) * 0x40)) +#define SXE_TPH_RXCTRL(_i) (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40))) + + +#define SXE_TPH_CTRL_ENABLE 0x00000000 +#define SXE_TPH_CTRL_DISABLE 0x00000001 +#define SXE_TPH_CTRL_MODE_CB1 0x00 +#define SXE_TPH_CTRL_MODE_CB2 0x02 + + +#define SXE_TPH_RXCTRL_DESC_TPH_EN BIT(5) +#define SXE_TPH_RXCTRL_HEAD_TPH_EN BIT(6) +#define SXE_TPH_RXCTRL_DATA_TPH_EN BIT(7) +#define SXE_TPH_RXCTRL_DESC_RRO_EN BIT(9) +#define SXE_TPH_RXCTRL_DATA_WRO_EN BIT(13) +#define SXE_TPH_RXCTRL_HEAD_WRO_EN BIT(15) +#define SXE_TPH_RXCTRL_CPUID_SHIFT 24 + +#define SXE_TPH_TXCTRL_DESC_TPH_EN BIT(5) +#define SXE_TPH_TXCTRL_DESC_RRO_EN BIT(9) +#define SXE_TPH_TXCTRL_DESC_WRO_EN BIT(11) +#define SXE_TPH_TXCTRL_DATA_RRO_EN BIT(13) +#define SXE_TPH_TXCTRL_CPUID_SHIFT 24 + + +#define SXE_SECTXCTRL 0x08800 +#define SXE_SECTXSTAT 0x08804 +#define SXE_SECTXBUFFAF 0x08808 +#define SXE_SECTXMINIFG 0x08810 +#define SXE_SECRXCTRL 0x08D00 +#define SXE_SECRXSTAT 0x08D04 +#define SXE_LSECTXCTRL 0x08A04 +#define SXE_LSECTXSCL 0x08A08 +#define SXE_LSECTXSCH 0x08A0C +#define SXE_LSECTXSA 0x08A10 +#define SXE_LSECTXPN(_n) (0x08A14 + (4 * (_n))) +#define SXE_LSECTXKEY(_n, _m) (0x08A1C + ((0x10 * (_n)) + (4 * (_m)))) +#define SXE_LSECRXCTRL 0x08B04 +#define SXE_LSECRXSCL 0x08B08 +#define SXE_LSECRXSCH 0x08B0C +#define SXE_LSECRXSA(_i) (0x08B10 + (4 * (_i))) +#define SXE_LSECRXPN(_i) (0x08B18 + (4 * (_i))) +#define SXE_LSECRXKEY(_n, _m) (0x08B20 + ((0x10 * (_n)) + (4 * (_m)))) + + +#define SXE_SECTXCTRL_SECTX_DIS 0x00000001 +#define SXE_SECTXCTRL_TX_DIS 0x00000002 +#define SXE_SECTXCTRL_STORE_FORWARD 0x00000004 + + +#define SXE_SECTXSTAT_SECTX_RDY 0x00000001 +#define SXE_SECTXSTAT_SECTX_OFF_DIS 0x00000002 +#define SXE_SECTXSTAT_ECC_TXERR 0x00000004 + + +#define SXE_SECRXCTRL_SECRX_DIS 0x00000001 +#define SXE_SECRXCTRL_RX_DIS 0x00000002 +#define SXE_SECRXCTRL_RP 0x00000080 + + +#define SXE_SECRXSTAT_SECRX_RDY 0x00000001 +#define SXE_SECRXSTAT_SECRX_OFF_DIS 0x00000002 +#define SXE_SECRXSTAT_ECC_RXERR 0x00000004 + +#define SXE_SECTX_DCB_ENABLE_MASK 0x00001F00 + +#define SXE_LSECTXCTRL_EN_MASK 0x00000003 +#define SXE_LSECTXCTRL_EN_SHIFT 0 +#define SXE_LSECTXCTRL_ES 0x00000010 +#define SXE_LSECTXCTRL_AISCI 0x00000020 +#define SXE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define SXE_LSECTXCTRL_PNTHRSH_SHIFT 8 +#define SXE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define SXE_LSECRXCTRL_EN_MASK 0x0000000C +#define SXE_LSECRXCTRL_EN_SHIFT 2 +#define SXE_LSECRXCTRL_DROP_EN 0x00000010 +#define SXE_LSECRXCTRL_DROP_EN_SHIFT 4 +#define SXE_LSECRXCTRL_PLSH 0x00000040 +#define SXE_LSECRXCTRL_PLSH_SHIFT 6 +#define SXE_LSECRXCTRL_RP 0x00000080 +#define SXE_LSECRXCTRL_RP_SHIFT 7 +#define SXE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +#define SXE_LSECTXSA_AN0_MASK 0x00000003 +#define SXE_LSECTXSA_AN0_SHIFT 0 +#define SXE_LSECTXSA_AN1_MASK 0x0000000C +#define SXE_LSECTXSA_AN1_SHIFT 2 +#define SXE_LSECTXSA_SELSA 0x00000010 +#define SXE_LSECTXSA_SELSA_SHIFT 4 +#define SXE_LSECTXSA_ACTSA 0x00000020 + +#define SXE_LSECRXSA_AN_MASK 0x00000003 +#define SXE_LSECRXSA_AN_SHIFT 0 +#define SXE_LSECRXSA_SAV 0x00000004 +#define SXE_LSECRXSA_SAV_SHIFT 2 +#define SXE_LSECRXSA_RETIRED 0x00000010 +#define SXE_LSECRXSA_RETIRED_SHIFT 4 + +#define SXE_LSECRXSCH_PI_MASK 0xFFFF0000 +#define SXE_LSECRXSCH_PI_SHIFT 16 + +#define SXE_LSECTXCTRL_DISABLE 0x0 +#define SXE_LSECTXCTRL_AUTH 0x1 +#define SXE_LSECTXCTRL_AUTH_ENCRYPT 0x2 + +#define SXE_LSECRXCTRL_DISABLE 0x0 +#define SXE_LSECRXCTRL_CHECK 0x1 +#define SXE_LSECRXCTRL_STRICT 0x2 +#define SXE_LSECRXCTRL_DROP 0x3 +#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + + + +#define SXE_IPSTXIDX 0x08900 +#define SXE_IPSTXSALT 0x08904 +#define SXE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) +#define SXE_IPSRXIDX 0x08E00 +#define SXE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) +#define SXE_IPSRXSPI 0x08E14 +#define SXE_IPSRXIPIDX 0x08E18 +#define SXE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) +#define SXE_IPSRXSALT 0x08E2C +#define SXE_IPSRXMOD 0x08E30 + + + +#define SXE_FNAVCTRL 0x0EE00 +#define SXE_FNAVHKEY 0x0EE68 +#define SXE_FNAVSKEY 0x0EE6C +#define SXE_FNAVDIP4M 0x0EE3C +#define SXE_FNAVSIP4M 0x0EE40 +#define SXE_FNAVTCPM 0x0EE44 +#define SXE_FNAVUDPM 0x0EE48 +#define SXE_FNAVIP6M 0x0EE74 +#define SXE_FNAVM 0x0EE70 + +#define SXE_FNAVFREE 0x0EE38 +#define SXE_FNAVLEN 0x0EE4C +#define SXE_FNAVUSTAT 0x0EE50 +#define SXE_FNAVFSTAT 0x0EE54 +#define SXE_FNAVMATCH 0x0EE58 +#define SXE_FNAVMISS 0x0EE5C + +#define SXE_FNAVSIPv6(_i) (0x0EE0C + ((_i) * 4)) +#define SXE_FNAVIPSA 0x0EE18 +#define SXE_FNAVIPDA 0x0EE1C +#define SXE_FNAVPORT 0x0EE20 +#define SXE_FNAVVLAN 0x0EE24 +#define SXE_FNAVHASH 0x0EE28 +#define SXE_FNAVCMD 0x0EE2C + + +#define SXE_FNAVCTRL_FLEX_SHIFT 16 +#define SXE_FNAVCTRL_MAX_LENGTH_SHIFT 24 +#define SXE_FNAVCTRL_FULL_THRESH_SHIFT 28 +#define SXE_FNAVCTRL_DROP_Q_SHIFT 8 +#define SXE_FNAVCTRL_PBALLOC_64K 0x00000001 +#define SXE_FNAVCTRL_PBALLOC_128K 0x00000002 +#define SXE_FNAVCTRL_PBALLOC_256K 0x00000003 +#define SXE_FNAVCTRL_INIT_DONE 0x00000008 +#define SXE_FNAVCTRL_SPECIFIC_MATCH 0x00000010 +#define SXE_FNAVCTRL_REPORT_STATUS 0x00000020 +#define SXE_FNAVCTRL_REPORT_STATUS_ALWAYS 0x00000080 + +#define SXE_FNAVCTRL_FLEX_MASK (0x1F << SXE_FNAVCTRL_FLEX_SHIFT) + +#define SXE_FNAVTCPM_DPORTM_SHIFT 16 + +#define SXE_FNAVM_VLANID 0x00000001 +#define SXE_FNAVM_VLANP 0x00000002 +#define SXE_FNAVM_POOL 0x00000004 +#define SXE_FNAVM_L4P 0x00000008 +#define SXE_FNAVM_FLEX 0x00000010 +#define SXE_FNAVM_DIPv6 0x00000020 + +#define SXE_FNAVPORT_DESTINATION_SHIFT 16 +#define SXE_FNAVVLAN_FLEX_SHIFT 16 +#define SXE_FNAVHASH_SIG_SW_INDEX_SHIFT 16 + +#define SXE_FNAVCMD_CMD_MASK 0x00000003 +#define SXE_FNAVCMD_CMD_ADD_FLOW 0x00000001 +#define SXE_FNAVCMD_CMD_REMOVE_FLOW 0x00000002 +#define SXE_FNAVCMD_CMD_QUERY_REM_FILT 0x00000003 +#define SXE_FNAVCMD_FILTER_VALID 0x00000004 +#define SXE_FNAVCMD_FILTER_UPDATE 0x00000008 +#define SXE_FNAVCMD_IPv6DMATCH 0x00000010 +#define SXE_FNAVCMD_L4TYPE_UDP 0x00000020 +#define SXE_FNAVCMD_L4TYPE_TCP 0x00000040 +#define SXE_FNAVCMD_L4TYPE_SCTP 0x00000060 +#define SXE_FNAVCMD_IPV6 0x00000080 +#define SXE_FNAVCMD_CLEARHT 0x00000100 +#define SXE_FNAVCMD_DROP 0x00000200 +#define SXE_FNAVCMD_INT 0x00000400 +#define SXE_FNAVCMD_LAST 0x00000800 +#define SXE_FNAVCMD_COLLISION 0x00001000 +#define SXE_FNAVCMD_QUEUE_EN 0x00008000 +#define SXE_FNAVCMD_FLOW_TYPE_SHIFT 5 +#define SXE_FNAVCMD_RX_QUEUE_SHIFT 16 +#define SXE_FNAVCMD_RX_TUNNEL_FILTER_SHIFT 23 +#define SXE_FNAVCMD_VT_POOL_SHIFT 24 +#define SXE_FNAVCMD_CMD_POLL 10 +#define SXE_FNAVCMD_TUNNEL_FILTER 0x00800000 + + +#define SXE_LXOFFRXCNT 0x041A8 +#define SXE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) + +#define SXE_EPC_GPRC 0x050E0 +#define SXE_RXDGPC 0x02F50 +#define SXE_RXDGBCL 0x02F54 +#define SXE_RXDGBCH 0x02F58 +#define SXE_RXDDGPC 0x02F5C +#define SXE_RXDDGBCL 0x02F60 +#define SXE_RXDDGBCH 0x02F64 +#define SXE_RXLPBKGPC 0x02F68 +#define SXE_RXLPBKGBCL 0x02F6C +#define SXE_RXLPBKGBCH 0x02F70 +#define SXE_RXDLPBKGPC 0x02F74 +#define SXE_RXDLPBKGBCL 0x02F78 +#define SXE_RXDLPBKGBCH 0x02F7C + +#define SXE_RXTPCIN 0x02F88 +#define SXE_RXTPCOUT 0x02F8C +#define SXE_RXPRDDC 0x02F9C + +#define SXE_TXDGPC 0x087A0 +#define SXE_TXDGBCL 0x087A4 +#define SXE_TXDGBCH 0x087A8 +#define SXE_TXSWERR 0x087B0 +#define SXE_TXSWITCH 0x087B4 +#define SXE_TXREPEAT 0x087B8 +#define SXE_TXDESCERR 0x087BC +#define SXE_MNGPRC 0x040B4 +#define SXE_MNGPDC 0x040B8 +#define SXE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define SXE_TQSM(_i) (0x08600 + ((_i) * 4)) +#define SXE_QPRC(_i) (0x01030 + ((_i) * 0x40)) +#define SXE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) +#define SXE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) + + +#define SXE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) +#define SXE_QPTC(_i) (0x08680 + ((_i) * 0x4)) +#define SXE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) +#define SXE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) +#define SXE_SSVPC 0x08780 +#define SXE_MNGPTC 0x0CF90 +#define SXE_MPC(_i) (0x03FA0 + ((_i) * 4)) + +#define SXE_DBUDRTCICNT(_i) (0x03C6C + ((_i) * 4)) +#define SXE_DBUDRTCOCNT(_i) (0x03C8C + ((_i) * 4)) +#define SXE_DBUDRBDPCNT(_i) (0x03D20 + ((_i) * 4)) +#define SXE_DBUDREECNT(_i) (0x03D40 + ((_i) * 4)) +#define SXE_DBUDROFPCNT(_i) (0x03D60 + ((_i) * 4)) +#define SXE_DBUDTTCICNT(_i) (0x0CE54 + ((_i) * 4)) +#define SXE_DBUDTTCOCNT(_i) (0x0CE74 + ((_i) * 4)) + + + +#define SXE_WUC 0x05800 +#define SXE_WUFC 0x05808 +#define SXE_WUS 0x05810 +#define SXE_IP6AT(_i) (0x05880 + ((_i) * 4)) + + +#define SXE_IP6AT_CNT 4 + + +#define SXE_WUC_PME_EN 0x00000002 +#define SXE_WUC_PME_STATUS 0x00000004 +#define SXE_WUC_WKEN 0x00000010 +#define SXE_WUC_APME 0x00000020 + + +#define SXE_WUFC_LNKC 0x00000001 +#define SXE_WUFC_MAG 0x00000002 +#define SXE_WUFC_EX 0x00000004 +#define SXE_WUFC_MC 0x00000008 +#define SXE_WUFC_BC 0x00000010 +#define SXE_WUFC_ARP 0x00000020 +#define SXE_WUFC_IPV4 0x00000040 +#define SXE_WUFC_IPV6 0x00000080 +#define SXE_WUFC_MNG 0x00000100 + + + + +#define SXE_TSCTRL 0x14800 +#define SXE_TSES 0x14804 +#define SXE_TSYNCTXCTL 0x14810 +#define SXE_TSYNCRXCTL 0x14820 +#define SXE_RXSTMPL 0x14824 +#define SXE_RXSTMPH 0x14828 +#define SXE_SYSTIML 0x14840 +#define SXE_SYSTIMM 0x14844 +#define SXE_SYSTIMH 0x14848 +#define SXE_TIMADJL 0x14850 +#define SXE_TIMADJH 0x14854 +#define SXE_TIMINC 0x14860 + + +#define SXE_TSYNCTXCTL_TXTT 0x0001 +#define SXE_TSYNCTXCTL_TEN 0x0010 + + +#define SXE_TSYNCRXCTL_RXTT 0x0001 +#define SXE_TSYNCRXCTL_REN 0x0010 + + +#define SXE_TSCTRL_TSSEL 0x00001 +#define SXE_TSCTRL_TSEN 0x00002 +#define SXE_TSCTRL_VER_2 0x00010 +#define SXE_TSCTRL_ONESTEP 0x00100 +#define SXE_TSCTRL_CSEN 0x01000 +#define SXE_TSCTRL_PTYP_ALL 0x00C00 +#define SXE_TSCTRL_L4_UNICAST 0x08000 + + +#define SXE_TSES_TXES 0x00200 +#define SXE_TSES_RXES 0x00800 +#define SXE_TSES_TXES_V1_SYNC 0x00000 +#define SXE_TSES_TXES_V1_DELAY_REQ 0x00100 +#define SXE_TSES_TXES_V1_ALL 0x00200 +#define SXE_TSES_RXES_V1_SYNC 0x00000 +#define SXE_TSES_RXES_V1_DELAY_REQ 0x00400 +#define SXE_TSES_RXES_V1_ALL 0x00800 +#define SXE_TSES_TXES_V2_ALL 0x00200 +#define SXE_TSES_RXES_V2_ALL 0x00800 + +#define SXE_IV_SNS 0 +#define SXE_IV_NS 8 +#define SXE_INCPD 0 +#define SXE_BASE_INCVAL 8 + + +#define SXE_VT_CTL 0x051B0 +#define SXE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) + +#define SXE_PFMBICR(_i) (0x00710 + (4 * (_i))) +#define SXE_VFLRE(i) ((i & 1)? 0x001C0 : 0x00600) +#define SXE_VFLREC(i) (0x00700 + (i * 4)) +#define SXE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define SXE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define SXE_QDE (0x02F04) +#define SXE_SPOOF(_i) (0x08200 + (_i) * 4) +#define SXE_PFDTXGSWC 0x08220 +#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4)) +#define SXE_VMOLR(_i) (0x0F000 + ((_i) * 4)) +#define SXE_VLVF(_i) (0x0F100 + ((_i) * 4)) +#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4)) +#define SXE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define SXE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define SXE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define SXE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define SXE_PFMBMEM(_i) (0x13000 + (64 * (_i))) + + +#define SXE_VMOLR_CNT 64 +#define SXE_VLVF_CNT 64 +#define SXE_VLVFB_CNT 128 +#define SXE_MRCTL_CNT 4 +#define SXE_VMRVLAN_CNT 8 +#define SXE_VMRVM_CNT 8 +#define SXE_SPOOF_CNT 8 +#define SXE_VMVIR_CNT 64 +#define SXE_VFRE_CNT 2 + + +#define SXE_VMVIR_VLANA_MASK 0xC0000000 +#define SXE_VMVIR_VLAN_VID_MASK 0x00000FFF +#define SXE_VMVIR_VLAN_UP_MASK 0x0000E000 + + +#define SXE_MRCTL_VPME 0x01 + +#define SXE_MRCTL_UPME 0x02 + +#define SXE_MRCTL_DPME 0x04 + +#define SXE_MRCTL_VLME 0x08 + + +#define SXE_VT_CTL_DIS_DEFPL 0x20000000 +#define SXE_VT_CTL_REPLEN 0x40000000 +#define SXE_VT_CTL_VT_ENABLE 0x00000001 +#define SXE_VT_CTL_POOL_SHIFT 7 +#define SXE_VT_CTL_POOL_MASK (0x3F << SXE_VT_CTL_POOL_SHIFT) + + +#define SXE_PFMAILBOX_STS 0x00000001 +#define SXE_PFMAILBOX_ACK 0x00000002 +#define SXE_PFMAILBOX_VFU 0x00000004 +#define SXE_PFMAILBOX_PFU 0x00000008 +#define SXE_PFMAILBOX_RVFU 0x00000010 + + +#define SXE_PFMBICR_VFREQ 0x00000001 +#define SXE_PFMBICR_VFACK 0x00010000 +#define SXE_PFMBICR_VFREQ_MASK 0x0000FFFF +#define SXE_PFMBICR_VFACK_MASK 0xFFFF0000 + + +#define SXE_QDE_ENABLE (0x00000001) +#define SXE_QDE_HIDE_VLAN (0x00000002) +#define SXE_QDE_IDX_MASK (0x00007F00) +#define SXE_QDE_IDX_SHIFT (8) +#define SXE_QDE_WRITE (0x00010000) + + + +#define SXE_SPOOF_VLAN_SHIFT (8) + + +#define SXE_PFDTXGSWC_VT_LBEN 0x1 + + +#define SXE_VMVIR_VLANA_DEFAULT 0x40000000 +#define SXE_VMVIR_VLANA_NEVER 0x80000000 + + +#define SXE_VMOLR_UPE 0x00400000 +#define SXE_VMOLR_VPE 0x00800000 +#define SXE_VMOLR_AUPE 0x01000000 +#define SXE_VMOLR_ROMPE 0x02000000 +#define SXE_VMOLR_ROPE 0x04000000 +#define SXE_VMOLR_BAM 0x08000000 +#define SXE_VMOLR_MPE 0x10000000 + + +#define SXE_VLVF_VIEN 0x80000000 +#define SXE_VLVF_ENTRIES 64 +#define SXE_VLVF_VLANID_MASK 0x00000FFF + + +#define SXE_HDC_HOST_BASE 0x16000 +#define SXE_HDC_SW_LK (SXE_HDC_HOST_BASE + 0x00) +#define SXE_HDC_PF_LK (SXE_HDC_HOST_BASE + 0x04) +#define SXE_HDC_SW_OV (SXE_HDC_HOST_BASE + 0x08) +#define SXE_HDC_FW_OV (SXE_HDC_HOST_BASE + 0x0C) +#define SXE_HDC_PACKET_HEAD0 (SXE_HDC_HOST_BASE + 0x10) + +#define SXE_HDC_PACKET_DATA0 (SXE_HDC_HOST_BASE + 0x20) + + +#define SXE_HDC_MSI_STATUS_REG 0x17000 +#define SXE_FW_STATUS_REG 0x17004 +#define SXE_DRV_STATUS_REG 0x17008 +#define SXE_FW_HDC_STATE_REG 0x1700C +#define SXE_R0_MAC_ADDR_RAL 0x17010 +#define SXE_R0_MAC_ADDR_RAH 0x17014 +#define SXE_CRC_STRIP_REG 0x17018 + + +#define SXE_HDC_SW_LK_BIT 0x0001 +#define SXE_HDC_PF_LK_BIT 0x0003 +#define SXE_HDC_SW_OV_BIT 0x0001 +#define SXE_HDC_FW_OV_BIT 0x0001 +#define SXE_HDC_RELEASE_SW_LK 0x0000 + +#define SXE_HDC_LEN_TO_REG(n) (n - 1) +#define SXE_HDC_LEN_FROM_REG(n) (n + 1) + + +#define SXE_RX_PKT_BUF_SIZE_SHIFT 10 +#define SXE_TX_PKT_BUF_SIZE_SHIFT 10 + +#define SXE_RXIDX_TBL_SHIFT 1 +#define SXE_RXTXIDX_IPS_EN 0x00000001 +#define SXE_RXTXIDX_IDX_SHIFT 3 +#define SXE_RXTXIDX_READ 0x40000000 +#define SXE_RXTXIDX_WRITE 0x80000000 + + +#define SXE_KEEP_CRC_EN 0x00000001 + + +#define SXE_VMD_CTL 0x0581C + + +#define SXE_VMD_CTL_POOL_EN 0x00000001 +#define SXE_VMD_CTL_POOL_FILTER 0x00000002 + + +#define SXE_FLCTRL 0x14300 +#define SXE_PFCTOP 0x14304 +#define SXE_FCTTV0 0x14310 +#define SXE_FCTTV(_i) (SXE_FCTTV0 + ((_i) * 4)) +#define SXE_FCRTV 0x14320 +#define SXE_TFCS 0x14324 + + +#define SXE_FCTRL_TFCE_MASK 0x0018 +#define SXE_FCTRL_TFCE_LFC_EN 0x0008 +#define SXE_FCTRL_TFCE_PFC_EN 0x0010 +#define SXE_FCTRL_TFCE_DPF_EN 0x0020 +#define SXE_FCTRL_RFCE_MASK 0x0300 +#define SXE_FCTRL_RFCE_LFC_EN 0x0100 +#define SXE_FCTRL_RFCE_PFC_EN 0x0200 + +#define SXE_FCTRL_TFCE_FCEN_MASK 0x00FF0000 +#define SXE_FCTRL_TFCE_XONE_MASK 0xFF000000 + + +#define SXE_PFCTOP_FCT 0x8808 +#define SXE_PFCTOP_FCOP_MASK 0xFFFF0000 +#define SXE_PFCTOP_FCOP_PFC 0x01010000 +#define SXE_PFCTOP_FCOP_LFC 0x00010000 + + +#define SXE_COMCTRL 0x14400 +#define SXE_PCCTRL 0x14404 +#define SXE_LPBKCTRL 0x1440C +#define SXE_MAXFS 0x14410 +#define SXE_SACONH 0x14420 +#define SXE_SACONL 0x14424 +#define SXE_VLANCTRL 0x14430 +#define SXE_VLANID 0x14434 +#define SXE_LINKS 0x14454 +#define SXE_FPGA_SDS_STS 0x14704 +#define SXE_MSCA 0x14500 +#define SXE_MSCD 0x14504 + +#define SXE_HLREG0 0x04240 +#define SXE_MFLCN 0x04294 +#define SXE_MACC 0x04330 + +#define SXE_PCS1GLSTA 0x0420C +#define SXE_MFLCN 0x04294 +#define SXE_PCS1GANA 0x04850 +#define SXE_PCS1GANLP 0x04854 + + +#define SXE_LPBKCTRL_EN 0x00000001 + + +#define SXE_MAC_ADDR_SACONH_SHIFT 32 +#define SXE_MAC_ADDR_SACONL_MASK 0xFFFFFFFF + + +#define SXE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define SXE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define SXE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define SXE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define SXE_PCS1GANA_SYM_PAUSE 0x100 +#define SXE_PCS1GANA_ASM_PAUSE 0x80 + + +#define SXE_LKSTS_PCS_LKSTS_UP 0x00000001 +#define SXE_LINK_UP_TIME 90 +#define SXE_AUTO_NEG_TIME 45 + + +#define SXE_MSCA_NP_ADDR_MASK 0x0000FFFF +#define SXE_MSCA_NP_ADDR_SHIFT 0 +#define SXE_MSCA_DEV_TYPE_MASK 0x001F0000 +#define SXE_MSCA_DEV_TYPE_SHIFT 16 +#define SXE_MSCA_PHY_ADDR_MASK 0x03E00000 +#define SXE_MSCA_PHY_ADDR_SHIFT 21 +#define SXE_MSCA_OP_CODE_MASK 0x0C000000 +#define SXE_MSCA_OP_CODE_SHIFT 26 +#define SXE_MSCA_ADDR_CYCLE 0x00000000 +#define SXE_MSCA_WRITE 0x04000000 +#define SXE_MSCA_READ 0x0C000000 +#define SXE_MSCA_READ_AUTOINC 0x08000000 +#define SXE_MSCA_ST_CODE_MASK 0x30000000 +#define SXE_MSCA_ST_CODE_SHIFT 28 +#define SXE_MSCA_NEW_PROTOCOL 0x00000000 +#define SXE_MSCA_OLD_PROTOCOL 0x10000000 +#define SXE_MSCA_BYPASSRA_C45 0x40000000 +#define SXE_MSCA_MDI_CMD_ON_PROG 0x80000000 + + +#define MDIO_MSCD_RDATA_LEN 16 +#define MDIO_MSCD_RDATA_SHIFT 16 + + +#define SXE_CRCERRS 0x14A04 +#define SXE_ERRBC 0x14A10 +#define SXE_RLEC 0x14A14 +#define SXE_PRC64 0x14A18 +#define SXE_PRC127 0x14A1C +#define SXE_PRC255 0x14A20 +#define SXE_PRC511 0x14A24 +#define SXE_PRC1023 0x14A28 +#define SXE_PRC1522 0x14A2C +#define SXE_BPRC 0x14A30 +#define SXE_MPRC 0x14A34 +#define SXE_GPRC 0x14A38 +#define SXE_GORCL 0x14A3C +#define SXE_GORCH 0x14A40 +#define SXE_RUC 0x14A44 +#define SXE_RFC 0x14A48 +#define SXE_ROC 0x14A4C +#define SXE_RJC 0x14A50 +#define SXE_TORL 0x14A54 +#define SXE_TORH 0x14A58 +#define SXE_TPR 0x14A5C +#define SXE_PRCPF(_i) (0x14A60 + ((_i) * 4)) +#define SXE_GPTC 0x14B00 +#define SXE_GOTCL 0x14B04 +#define SXE_GOTCH 0x14B08 +#define SXE_TPT 0x14B0C +#define SXE_PTC64 0x14B10 +#define SXE_PTC127 0x14B14 +#define SXE_PTC255 0x14B18 +#define SXE_PTC511 0x14B1C +#define SXE_PTC1023 0x14B20 +#define SXE_PTC1522 0x14B24 +#define SXE_MPTC 0x14B28 +#define SXE_BPTC 0x14B2C +#define SXE_PFCT(_i) (0x14B30 + ((_i) * 4)) + +#define SXE_MACCFG 0x0CE04 +#define SXE_MACCFG_PAD_EN 0x00000001 + + +#define SXE_COMCTRL_TXEN 0x0001 +#define SXE_COMCTRL_RXEN 0x0002 +#define SXE_COMCTRL_EDSEL 0x0004 +#define SXE_COMCTRL_SPEED_1G 0x0200 +#define SXE_COMCTRL_SPEED_10G 0x0300 + + +#define SXE_PCCTRL_TXCE 0x0001 +#define SXE_PCCTRL_RXCE 0x0002 +#define SXE_PCCTRL_PEN 0x0100 +#define SXE_PCCTRL_PCSC_ALL 0x30000 + + +#define SXE_MAXFS_TFSEL 0x0001 +#define SXE_MAXFS_RFSEL 0x0002 +#define SXE_MAXFS_MFS_MASK 0xFFFF0000 +#define SXE_MAXFS_MFS 0x40000000 +#define SXE_MAXFS_MFS_SHIFT 16 + + +#define SXE_LINKS_UP 0x00000001 + +#define SXE_10G_LINKS_DOWN 0x00000006 + + +#define SXE_LINK_SPEED_UNKNOWN 0 +#define SXE_LINK_SPEED_10_FULL 0x0002 +#define SXE_LINK_SPEED_100_FULL 0x0008 +#define SXE_LINK_SPEED_1GB_FULL 0x0020 +#define SXE_LINK_SPEED_10GB_FULL 0x0080 + + +#define SXE_HLREG0_TXCRCEN 0x00000001 +#define SXE_HLREG0_RXCRCSTRP 0x00000002 +#define SXE_HLREG0_JUMBOEN 0x00000004 +#define SXE_HLREG0_TXPADEN 0x00000400 +#define SXE_HLREG0_TXPAUSEEN 0x00001000 +#define SXE_HLREG0_RXPAUSEEN 0x00004000 +#define SXE_HLREG0_LPBK 0x00008000 +#define SXE_HLREG0_MDCSPD 0x00010000 +#define SXE_HLREG0_CONTMDC 0x00020000 +#define SXE_HLREG0_CTRLFLTR 0x00040000 +#define SXE_HLREG0_PREPEND 0x00F00000 +#define SXE_HLREG0_PRIPAUSEEN 0x01000000 +#define SXE_HLREG0_RXPAUSERECDA 0x06000000 +#define SXE_HLREG0_RXLNGTHERREN 0x08000000 +#define SXE_HLREG0_RXPADSTRIPEN 0x10000000 + +#define SXE_MFLCN_PMCF 0x00000001 +#define SXE_MFLCN_DPF 0x00000002 +#define SXE_MFLCN_RPFCE 0x00000004 +#define SXE_MFLCN_RFCE 0x00000008 +#define SXE_MFLCN_RPFCE_MASK 0x00000FF4 +#define SXE_MFLCN_RPFCE_SHIFT 4 + +#define SXE_MACC_FLU 0x00000001 +#define SXE_MACC_FSV_10G 0x00030000 +#define SXE_MACC_FS 0x00040000 + +#define SXE_DEFAULT_FCPAUSE 0xFFFF + + +#define SXE_SAQF(_i) (0x0E000 + ((_i) * 4)) +#define SXE_DAQF(_i) (0x0E200 + ((_i) * 4)) +#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4)) +#define SXE_FTQF(_i) (0x0E600 + ((_i) * 4)) +#define SXE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) + +#define SXE_MAX_FTQF_FILTERS 128 +#define SXE_FTQF_PROTOCOL_MASK 0x00000003 +#define SXE_FTQF_PROTOCOL_TCP 0x00000000 +#define SXE_FTQF_PROTOCOL_UDP 0x00000001 +#define SXE_FTQF_PROTOCOL_SCTP 2 +#define SXE_FTQF_PRIORITY_MASK 0x00000007 +#define SXE_FTQF_PRIORITY_SHIFT 2 +#define SXE_FTQF_POOL_MASK 0x0000003F +#define SXE_FTQF_POOL_SHIFT 8 +#define SXE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define SXE_FTQF_5TUPLE_MASK_SHIFT 25 +#define SXE_FTQF_SOURCE_ADDR_MASK 0x1E +#define SXE_FTQF_DEST_ADDR_MASK 0x1D +#define SXE_FTQF_SOURCE_PORT_MASK 0x1B +#define SXE_FTQF_DEST_PORT_MASK 0x17 +#define SXE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define SXE_FTQF_POOL_MASK_EN 0x40000000 +#define SXE_FTQF_QUEUE_ENABLE 0x80000000 + +#define SXE_SDPQF_DSTPORT 0xFFFF0000 +#define SXE_SDPQF_DSTPORT_SHIFT 16 +#define SXE_SDPQF_SRCPORT 0x0000FFFF + +#define SXE_L34T_IMIR_SIZE_BP 0x00001000 +#define SXE_L34T_IMIR_RESERVE 0x00080000 +#define SXE_L34T_IMIR_LLI 0x00100000 +#define SXE_L34T_IMIR_QUEUE 0x0FE00000 +#define SXE_L34T_IMIR_QUEUE_SHIFT 21 + +#define SXE_VMTXSW(_i) (0x05180 + ((_i) * 4)) +#define SXE_VMTXSW_REGISTER_COUNT 2 + +#define SXE_TXSTMP_SEL 0x14510 +#define SXE_TXSTMP_VAL 0x1451c + +#define SXE_TXTS_MAGIC0 0x005a005900580057 +#define SXE_TXTS_MAGIC1 0x005e005d005c005b + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe_drv_type.h b/drivers/net/ethernet/linkdata/sxe/include/sxe_drv_type.h new file mode 100644 index 000000000000..69505651377d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe_drv_type.h @@ -0,0 +1,20 @@ + +#ifndef __SXE_DRV_TYPEDEF_H__ +#define __SXE_DRV_TYPEDEF_H__ + +#ifdef SXE_DPDK +#include "sxe_types.h" +#ifndef bool +typedef _Bool bool; +#endif +#else +#include +#endif + +typedef u8 U8; +typedef u16 U16; +typedef u32 U32; +typedef u64 U64; +typedef bool BOOL; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/include/sxe_version.h b/drivers/net/ethernet/linkdata/sxe/include/sxe_version.h new file mode 100644 index 000000000000..47c08a1518e1 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/include/sxe_version.h @@ -0,0 +1,28 @@ +#ifndef __SXE_VER_H__ +#define __SXE_VER_H__ + +#define SXE_VERSION "1.3.0.12" +#define SXE_COMMIT_ID "f0e5e96" +#define SXE_BRANCH "develop/rc/sagitta-1.3.0_B012" +#define SXE_BUILD_TIME "2024-08-27 15:56:18" + + +#define SXE_DRV_NAME "sxe" +#define SXEVF_DRV_NAME "sxevf" +#define SXE_DRV_LICENSE "GPL v2" +#define SXE_DRV_AUTHOR "sxe" +#define SXEVF_DRV_AUTHOR "sxevf" +#define SXE_DRV_DESCRIPTION "sxe driver" +#define SXEVF_DRV_DESCRIPTION "sxevf driver" + + +#define SXE_FW_NAME "soc" +#define SXE_FW_ARCH "arm32" + +#ifndef PS3_CFG_RELEASE +#define PS3_SXE_FW_BUILD_MODE "debug" +#else +#define PS3_SXE_FW_BUILD_MODE "release" +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe.h new file mode 100644 index 000000000000..368d0fbd6795 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe.h @@ -0,0 +1,400 @@ + +#ifndef __SXE_H__ +#define __SXE_H__ + +#include +#include +#include +#include + +#include +#include +#include + +#include "sxe_log.h" +#include "sxe_hw.h" +#include "sxe_irq.h" +#include "sxe_phy.h" +#include "sxe_monitor.h" +#include "sxe_ipsec.h" +#include "sxe_dcb.h" +#include "sxe_errno.h" +#include "drv_msg.h" +#include "sxe_compat.h" + +#define SXE_ETH_DEAD_LOAD (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN) +#define SXE_MAX_JUMBO_FRAME_SIZE 9728 +#define DEV_NAME_LEN 16 + +#define CHAR_BITS (8) + +#define SXE_HZ_TRANSTO_MS 1000 + +#define PCI_BDF_DEV_SHIFT (3) +#define PCI_BDF_DEV_MASK (0x1F) +#define PCI_BDF_FUNC_MASK (0x7) + +#ifdef SXE_TEST +#define SXE_MAX_MACVLANS 3 +#else +#define SXE_MAX_MACVLANS 63 +#endif + +#define SXE_KFREE(addr) \ + do { \ + if (addr != NULL) { \ + kfree(addr); \ + addr = NULL; \ + } \ + } while(0) + +enum adapter_cap { + SXE_DCB_ENABLE = BIT(0), + SXE_SRIOV_ENABLE = BIT(1), + SXE_FNAV_SAMPLE_ENABLE = BIT(2), + SXE_FNAV_SPECIFIC_ENABLE = BIT(3), + SXE_SRIOV_DCB_ENABLE = (SXE_DCB_ENABLE | SXE_SRIOV_ENABLE), + SXE_LRO_ENABLE = BIT(4), + SXE_RSS_FIELD_IPV4_UDP = BIT(5), + SXE_RSS_FIELD_IPV6_UDP = BIT(6), + SXE_RX_LEGACY = BIT(7), + SXE_RX_HWTSTAMP_ENABLED = BIT(8), + SXE_MSI_ENABLED = BIT(9), + SXE_MSIX_ENABLED = BIT(10), + SXE_VLAN_PROMISC = BIT(11), + SXE_LRO_CAPABLE = BIT(12), + SXE_RSS_ENABLE = BIT(13), + SXE_MACVLAN_ENABLE = BIT(14), + SXE_1588V2_ONE_STEP = BIT(15), + SXE_PTP_PPS_ENABLED = BIT(16), + SXE_RX_HWTSTAMP_IN_REGISTER = BIT(17), + SXE_TPH_CAPABLE = BIT(18), + SXE_TPH_ENABLE = BIT(19), +#ifdef SXE_IPSEC_CONFIGURE + SXE_IPSEC_ENABLED = BIT(20), + SXE_VF_IPSEC_ENABLED = BIT(21), +#endif +}; + +enum sxe_nic_state { + SXE_RESETTING, + SXE_TESTING, + SXE_DOWN, + SXE_DISABLED, + SXE_REMOVING, + SXE_PTP_RUNNING, + SXE_PTP_TX_IN_PROGRESS, + SXE_IN_SFP_INIT, + SXE_SFP_MULTI_SPEED_SETTING, +}; + +struct sxe_sw_stats { + u64 tx_busy; + u64 non_eop_descs; + u64 lro_total_count; + u64 lro_total_flush; + u64 fnav_overflow; + u64 reset_work_trigger_cnt; + u64 restart_queue; + u64 hw_csum_rx_error; + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 tx_hwtstamp_timeouts; + u64 tx_hwtstamp_skipped; + u64 rx_hwtstamp_cleared; + u64 tx_ipsec; + u64 rx_ipsec; + u64 link_state_change_cnt; +}; + +struct sxe_stats_info { + struct sxe_sw_stats sw; + struct sxe_mac_stats hw; + struct mutex stats_mutex; +}; + +struct sxe_macvlan { + struct net_device *netdev; + u32 tx_ring_offset; + u32 rx_ring_offset; + s32 pool; +}; + +struct sxe_fnav_rule_node { + struct hlist_node node; + union sxe_fnav_rule_info rule_info; + u16 sw_idx; + u64 ring_cookie; +}; + +struct sxe_fnav_context { + u32 rules_table_size; + + u32 sample_rate; + spinlock_t sample_lock; + u32 sample_rules_cnt; + time64_t fdir_overflow_time; + bool is_sample_table_overflowed; + DECLARE_HASHTABLE(sample_list, 13); + + spinlock_t specific_lock; + u32 rule_cnt; + struct hlist_head rules_list; + union sxe_fnav_rule_info rules_mask; +}; + + +struct sxe_vf_uc_addr_list{ + struct list_head list; + u8 vf_idx; + bool free; + bool is_macvlan; + u8 uc_addr[ETH_ALEN]; +}; + +struct sxe_vf_info { + u8 mac_addr[ETH_ALEN]; + u16 mc_hash[SXE_VF_MC_ENTRY_NUM_MAX]; + u8 mc_hash_used; + u16 pf_vlan; + u16 pf_qos; + u8 cast_mode; + u8 trusted :1; + u8 is_ready :1; + u8 spoof_chk_enabled :1; + u8 rss_query_enabled :1; + u8 mac_from_pf :1; + u8 reserved :3; + u16 tx_rate; + s32 link_enable; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + s32 link_state; +#endif + struct pci_dev *vf_dev; + u32 mbx_version; +}; + +struct sxe_virtual_context { + u8 num_vfs; + u16 bridge_mode; + u32 mbps_link_speed; + bool is_rate_set; + struct sxe_vf_uc_addr_list head; + struct sxe_vf_uc_addr_list *vf_uc_list; + struct sxe_vf_info *vf_info; + + u32 err_refcount; + spinlock_t vfs_lock; + + DECLARE_BITMAP(pf_pool_bitmap, SXE_MAX_MACVLANS + 1); +}; + +struct sxe_ptp_context { + struct cyclecounter hw_cc; + struct timecounter hw_tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + spinlock_t ptp_timer_lock; + void (*ptp_setup_spp)(struct sxe_adapter *); + u32 tx_hwtstamp_sec; + u32 tx_hwtstamp_nsec; +}; + +struct sxe_dcb_context { +#ifdef SXE_DCB_CONFIGURE + struct ieee_pfc *ieee_pfc; + struct ieee_ets *ieee_ets; +#endif + struct sxe_dcb_cee_config cee_cfg; + struct sxe_dcb_cee_config cee_temp_cfg; + u8 cee_cfg_bitmap; + u8 hw_tcs; + u8 dcbx_cap; + u8 default_up; + enum sxe_fc_mode last_lfc_mode; +}; + +struct sxe_uc_addr_table { + u8 addr[ETH_ALEN]; + u16 pool; + unsigned long state; +}; + +struct sxe_mac_filter_context { + u8 cur_mac_addr[ETH_ALEN]; + u8 def_mac_addr[ETH_ALEN]; + struct sxe_uc_addr_table *uc_addr_table; + + u32 mc_hash_table[SXE_MTA_ENTRY_NUM_MAX]; + u32 mc_hash_table_used; +}; + +struct sxe_vlan_context { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 vlan_table_size; +}; + +struct sxe_cdev_info { + struct cdev cdev; + dev_t dev_no; + struct device *device; +}; + +struct sxe_self_test_context { + u32 icr; + struct sxe_ring tx_ring; + struct sxe_ring rx_ring; +}; + +struct sxe_hdc_context { + struct completion sync_done; + struct work_struct time_sync_work; + u16 time_sync_failed; +}; + +struct sxe_fw_info { + u8 fw_version[SXE_VERSION_LEN]; +}; + +struct sxe_adapter { + char dev_name[DEV_NAME_LEN]; + struct net_device *netdev; + struct pci_dev *pdev; + struct sxe_hw hw; + + u32 cap; + u32 cap2; + + unsigned long state; + struct sxe_link_info link; + + u16 msg_enable; + + struct sxe_ring_feature ring_f; + struct sxe_pool_feature pool_f; + + struct sxe_ring_context rx_ring_ctxt; + struct sxe_ring_context tx_ring_ctxt; + struct sxe_ring_context xdp_ring_ctxt; + struct sxe_monitor_context monitor_ctxt; + + struct sxe_irq_context irq_ctxt; + + struct sxe_fnav_context fnav_ctxt; + + struct sxe_virtual_context vt_ctxt; + +#ifdef SXE_IPSEC_CONFIGURE + struct sxe_ipsec_context ipsec; +#endif + + struct bpf_prog *xdp_prog; +#ifdef HAVE_AF_XDP_ZERO_COPY + unsigned long *af_xdp_zc_qps; +#endif + + u32 *rss_key; + u8 rss_indir_tbl[SXE_MAX_RETA_ENTRIES]; + + struct sxe_ptp_context ptp_ctxt; + + struct sxe_dcb_context dcb_ctxt; + + struct sxe_vlan_context vlan_ctxt; + + struct sxe_mac_filter_context mac_filter_ctxt; + + struct sxe_stats_info stats; + + struct sxe_fw_info fw_info; + + struct dentry *debugfs_entries; + + struct sxe_phy_context phy_ctxt; + + struct sxe_self_test_context test_ctxt; + + struct sxe_cdev_info cdev_info; + struct sxe_hdc_context hdc_ctxt; + + u16 bridge_mode; + +#ifdef SXE_WOL_CONFIGURE + u32 wol; +#endif +}; + +struct sxe_fnav_sample_work_info { + struct work_struct work_st; + struct sxe_adapter *adapter; + u64 hash; +}; + +struct sxe_fnav_sample_filter { + struct hlist_node hlist; + u32 hash; +}; + +static inline u8 sxe_dcb_tc_get(struct sxe_adapter *adapter) +{ + return adapter->dcb_ctxt.hw_tcs; +} + +static inline void sxe_dcb_tc_set(struct sxe_adapter *adapter, u8 tcs) +{ + adapter->dcb_ctxt.hw_tcs = tcs; +} + +static inline u8 sxe_rxtx_pkt_buf_max(struct sxe_adapter *adapter) +{ + return (adapter->cap & SXE_DCB_ENABLE) ? SXE_PKG_BUF_NUM_MAX : 1; +} + +struct workqueue_struct *sxe_workqueue_get(void); + +void sxe_fw_version_get(struct sxe_adapter *adapter); + +s32 sxe_ring_irq_init(struct sxe_adapter *adapter); + +void sxe_ring_irq_exit(struct sxe_adapter *adapter); + +s32 sxe_hw_reset(struct sxe_adapter *adapter); + +void sxe_hw_start(struct sxe_hw *hw); + +static inline void stats_lock(struct sxe_adapter *adapter) +{ + mutex_lock(&adapter->stats.stats_mutex); + return; +} + +static inline void stats_unlock(struct sxe_adapter *adapter) +{ + mutex_unlock(&adapter->stats.stats_mutex); + return; +} + +static inline void carrier_lock(struct sxe_adapter *adapter) +{ + mutex_lock(&adapter->link.carrier_mutex); + return; +} + +static inline void carrier_unlock(struct sxe_adapter *adapter) +{ + mutex_unlock(&adapter->link.carrier_mutex); + return; +} + +void sxe_tph_update(struct sxe_irq_data *irq_data); + +void sxe_tph_setup(struct sxe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.c new file mode 100644 index 000000000000..ef397b89aa38 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.c @@ -0,0 +1,172 @@ + +#include +#include +#include + +#include "sxe_csum.h" +#include "sxe_ring.h" +#include "sxe_tx_proc.h" +#include "sxe_log.h" + +#ifndef HAVE_SKB_CSUM_SCTP_API +static inline bool sxe_is_sctp_ipv4(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + + if ((protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) { + LOG_DEBUG("protocal:0x%d tx packet type is ipv4 sctp.\n", + protocol); + ret = true; + } + + return ret; +} + +static inline bool sxe_is_sctp_ipv6(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + u32 offset = skb_checksum_start_offset(skb); + u32 hdr_offset = 0; + + ipv6_find_hdr(skb, &hdr_offset, IPPROTO_SCTP, NULL, NULL); + + if ((protocol == htons(ETH_P_IPV6)) && + (offset == hdr_offset)) { + LOG_DEBUG("protocal:0x%d offset:%d tx packet type is ipv6 sctp.\n", + protocol, offset); + ret = true; + } + + return ret; +} + +static inline bool sxe_prot_is_sctp(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + + if (sxe_is_sctp_ipv4(protocol, skb) || + sxe_is_sctp_ipv6(protocol, skb)) { + ret = true; + } + + return ret; +} +#else +#define sxe_prot_is_sctp(protocol, skb) skb_csum_is_sctp(skb) +#endif + +void sxe_tx_csum_offload(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *first, + struct sxe_tx_context_desc *ctxt_desc) +{ + u16 tucmd; + u16 ip_len; + u16 mac_len; + struct sk_buff *skb = first->skb; + struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev); + + LOG_DEBUG_BDF("tx ring[%d] ip_summed:%d " + "csum_offset:%d csum_start:%d protocol:%d " + "netdev features:0x%llx\n", + tx_ring->idx, skb->ip_summed, + skb->csum_offset, skb->csum_start, + skb->protocol, tx_ring->netdev->features); + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + goto no_checksum; + } + + switch (skb->csum_offset) { + case SXE_TCP_CSUM_OFFSET: + tucmd = SXE_TX_CTXTD_TUCMD_L4T_TCP; + break; + case SXE_UDP_CSUM_OFFSET: + tucmd = SXE_TX_CTXTD_TUCMD_L4T_UDP; + break; + case SXE_SCTP_CSUM_OFFSET: + if (sxe_prot_is_sctp(first->protocol, skb)) { + tucmd = SXE_TX_CTXTD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto no_checksum; + } + + first->tx_features |= SXE_TX_FEATURE_CSUM; + ip_len = skb_checksum_start_offset(skb) - skb_network_offset(skb); + + mac_len = skb_network_offset(skb); + + sxe_ctxt_desc_tucmd_set(ctxt_desc, tucmd); + sxe_ctxt_desc_iplen_set(ctxt_desc, ip_len); + sxe_ctxt_desc_maclen_set(ctxt_desc, mac_len); + + LOG_DEBUG_BDF("tx ring[%d] L3 protocol:%d tucmd:0x%x " + "iplen:0x%x mac_len:0x%x, tx_features:0x%x\n", + tx_ring->idx, first->protocol, tucmd, + ip_len, mac_len, first->tx_features); + +no_checksum: + return; +} + +void sxe_rx_csum_verify(struct sxe_ring *ring, + union sxe_rx_data_desc *desc, + struct sk_buff *skb) +{ +#ifndef SXE_DRIVER_RELEASE + __le16 pkt_info = desc->wb.lower.lo_dword.hs_rss.pkt_info; + + LOG_DEBUG("rx ring[%d] csum verify ip_summed:%d " + "csum_offset:%d csum_start:%d pkt_info:0x%x " + "netdev features:0x%llx\n", + ring->idx, skb->ip_summed, + skb->csum_offset, skb->csum_start, + pkt_info, ring->netdev->features); +#endif + skb_checksum_none_assert(skb); + + if (!(ring->netdev->features & NETIF_F_RXCSUM)) { + LOG_WARN("rx ring[%d] no offload checksum verify.\n", ring->idx); + goto l_out; + } + + if (sxe_status_err_check(desc, SXE_RXD_STAT_IPCS) && + sxe_status_err_check(desc, SXE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + LOG_ERROR("rx ring [%d] ip checksum fail.csum_err:%llu\n", + ring->idx, ring->rx_stats.csum_err); + goto l_out; + } + + if (sxe_status_err_check(desc, SXE_RXD_STAT_LB)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + goto l_out; + } + + if (!sxe_status_err_check(desc, SXE_RXD_STAT_L4CS)) { + LOG_DEBUG("rx ring[%d] no need verify L4 checksum\n", + ring->idx); + goto l_out; + } + + if (sxe_status_err_check(desc, SXE_RXDADV_ERR_L4E)) { + ring->rx_stats.csum_err++; + + LOG_ERROR("rx ring[%d] L4 checksum verify error.csum_err:%llu\n", + ring->idx, ring->rx_stats.csum_err); + goto l_out; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + LOG_DEBUG("rx ring[%d] ip_summed:%d sxe hw " + "verify checksum pass.\n", + ring->idx, skb->ip_summed); + +l_out: + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.h new file mode 100644 index 000000000000..769086e1c11b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_csum.h @@ -0,0 +1,36 @@ + +#ifndef __SXE_CSUM_H__ +#define __SXE_CSUM_H__ + +#include +#include +#include +#include + +#include "sxe_ipsec.h" +#include "sxe.h" + +#ifdef NOT_INCLUDE_SCTP_H +typedef struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +} __packed sctp_sctphdr_t; +#else +#include +#endif + +#define SXE_TCP_CSUM_OFFSET (offsetof(struct tcphdr, check)) +#define SXE_UDP_CSUM_OFFSET (offsetof(struct udphdr, check)) +#define SXE_SCTP_CSUM_OFFSET (offsetof(struct sctphdr, checksum)) + +void sxe_tx_csum_offload(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *first, + struct sxe_tx_context_desc *ctxt_desc); + +void sxe_rx_csum_verify(struct sxe_ring *ring, + union sxe_rx_data_desc *desc, + struct sk_buff *skb); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.c new file mode 100644 index 000000000000..dbc3ecebc56e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.c @@ -0,0 +1,487 @@ + +#include "sxe.h" + +#ifdef SXE_DCB_CONFIGURE + +#define SXE_TC_BWG_PERCENT_PER_CHAN (12) + +void sxe_dcb_init(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg; + + struct sxe_tc_config *tc; + u32 tc_index; + + netdev->dcbnl_ops = &sxe_dcbnl_ops; + + cee_cfg->num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + cee_cfg->num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + + for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) { + tc = &cee_cfg->tc_config[tc_index]; + tc->channel[DCB_PATH_TX].bwg_id = 0; + tc->channel[DCB_PATH_TX].bwg_percent = SXE_TC_BWG_PERCENT_PER_CHAN + (tc_index & 1); + tc->channel[DCB_PATH_RX].bwg_id = 0; + tc->channel[DCB_PATH_RX].bwg_percent = SXE_TC_BWG_PERCENT_PER_CHAN + (tc_index & 1); + tc->pfc_type = pfc_disabled; + } + + tc = &cee_cfg->tc_config[0]; + tc->channel[DCB_PATH_TX].up_to_tc_bitmap = 0xFF; + tc->channel[DCB_PATH_RX].up_to_tc_bitmap = 0xFF; + + cee_cfg->bwg_link_percent[DCB_PATH_TX][0] = SXE_PERCENT_100; + cee_cfg->bwg_link_percent[DCB_PATH_RX][0] = SXE_PERCENT_100; + cee_cfg->pfc_mode_enable = false; + adapter->dcb_ctxt.cee_cfg_bitmap = 0x00; + adapter->dcb_ctxt.dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + + memcpy(&adapter->dcb_ctxt.cee_temp_cfg, cee_cfg, + sizeof(adapter->dcb_ctxt.cee_temp_cfg)); + + return; +} + +static u32 sxe_dcb_min_credit_get(u32 max_frame) +{ + + return ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + +} + +static u16 sxe_dcb_cee_tc_link_percent_get( + struct sxe_dcb_cee_config *cee_config, + u8 direction, u8 tc_index) +{ + u8 bw_percent; + u16 link_percentage; + struct sxe_tc_bw_alloc *tc_info; + + tc_info = &cee_config->tc_config[tc_index].channel[direction]; + link_percentage = + cee_config->bwg_link_percent[direction][tc_info->bwg_id]; + bw_percent = tc_info->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / SXE_PERCENT_100; + + return link_percentage; +} + +static u32 sxe_dcb_cee_min_link_percent_get( + struct sxe_dcb_cee_config *cee_config, u8 direction) +{ + u8 tc_index; + u16 link_percentage; + u32 min_link_percent = SXE_PERCENT_100; + + for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) { + link_percentage = sxe_dcb_cee_tc_link_percent_get( + cee_config, direction, tc_index); + + if (link_percentage && link_percentage < min_link_percent) { + min_link_percent = link_percentage; + } + } + + return min_link_percent; +} + +s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw, + struct sxe_dcb_cee_config *cee_config, + u32 max_frame, u8 direction) +{ + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + struct sxe_tc_bw_alloc *tc_info; + u32 min_credit; + u32 total_credit; + u32 min_link_percent; + u32 credit_refill; + u32 credit_max; + u16 link_percentage; + u8 tc_index; + + if (!cee_config) { + ret = -DCB_ERR_CONFIG; + LOG_ERROR_BDF("config info is NULL\n"); + goto l_ret; + } + + LOG_DEBUG_BDF("cee_config[%p] input max_frame[%u] direction[%s]\n", + cee_config, max_frame, direction ? "RX" : "TX"); + + min_credit = sxe_dcb_min_credit_get(max_frame); + LOG_DEBUG_BDF("cee_config[%p] max_frame[%u] got min_credit[%u]\n", + cee_config, max_frame, min_credit); + + min_link_percent = sxe_dcb_cee_min_link_percent_get(cee_config, direction); + LOG_DEBUG_BDF("cee_config[%p] direction[%s] got min_link_percent[%u]\n", + cee_config, direction ? "RX" : "TX", min_link_percent); + + total_credit = (min_credit / min_link_percent) + 1; + LOG_DEBUG_BDF("cee_config[%p] total_credit=%u\n", cee_config, total_credit); + + for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) { + tc_info = &cee_config->tc_config[tc_index].channel[direction]; + + link_percentage = sxe_dcb_cee_tc_link_percent_get( + cee_config, direction, tc_index); + LOG_DEBUG_BDF("tc[%u] bwg_percent=%u, link_percentage=%u\n", + tc_index, tc_info->bwg_percent, link_percentage); + + if (tc_info->bwg_percent > 0 && link_percentage == 0) { + link_percentage = 1; + } + + tc_info->link_percent = (u8)link_percentage; + + credit_refill = min(link_percentage * total_credit, + (u32)MAX_CREDIT_REFILL); + + if (credit_refill < min_credit) { + credit_refill = min_credit; + } + + tc_info->data_credits_refill = (u16)credit_refill; + LOG_DEBUG_BDF("tc[%u] credit_refill=%u\n", + tc_index, credit_refill); + + credit_max = (link_percentage * MAX_CREDIT) / SXE_PERCENT_100; + + if (credit_max < min_credit) { + credit_max = min_credit; + } + LOG_DEBUG_BDF("tc[%u] credit_max=%u\n", + tc_index, credit_max); + + if (direction == DCB_PATH_TX) { + cee_config->tc_config[tc_index].desc_credits_max = + (u16)credit_max; + } + + tc_info->data_credits_max = (u16)credit_max; + } + +l_ret: + return ret; +} + +void sxe_dcb_cee_pfc_parse(struct sxe_dcb_cee_config *cfg, u8 *pfc_en) +{ + u32 tc; + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + + for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + if (tc_config[tc].pfc_type != pfc_disabled) { + *pfc_en |= BIT(tc); + } + } + LOG_DEBUG("cfg[%p] pfc_en[0x%x]\n", cfg, *pfc_en); + + return; +} + +void sxe_dcb_cee_refill_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u16 *refill) +{ + u32 tc; + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + refill[tc] = tc_config[tc]. \ + channel[direction].data_credits_refill; + LOG_DEBUG("tc[%u] --- refill[%u]\n", tc, refill[tc]); + } + + return; +} + +void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_cee_config *cfg, + u16 *max_credits) +{ + u32 tc; + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + max_credits[tc] = tc_config[tc].desc_credits_max; + LOG_DEBUG("tc[%u] --- max_credits[%u]\n", tc, max_credits[tc]); + } + + return; +} + +void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *bwgid) +{ + u32 tc; + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + bwgid[tc] = tc_config[tc].channel[direction].bwg_id; + LOG_DEBUG("tc[%u] --- bwgid[%u]\n", tc, bwgid[tc]); + } + + return; +} + +void sxe_dcb_cee_prio_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *ptype) +{ + u32 tc; + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + ptype[tc] = tc_config[tc].channel[direction].prio_type; + LOG_DEBUG("tc[%u] --- ptype[%u]\n", tc, ptype[tc]); + } + + return; +} + +u8 sxe_dcb_cee_get_tc_from_up(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 up) +{ + struct sxe_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = BIT(up); + u8 tc = cfg->num_tcs.pg_tcs; + + if (!tc) { + goto l_ret; + } + + for (tc--; tc; tc--) { + if (prio_mask & tc_config[tc].channel[direction].up_to_tc_bitmap) { + break; + } + } + +l_ret: + LOG_DEBUG("up[%u] to tc[%u]\n", up, tc); + return tc; +} + +void sxe_dcb_cee_up2tc_map_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *map) +{ + u8 up; + + for (up = 0; up < MAX_USER_PRIORITY; up++) { + map[up] = sxe_dcb_cee_get_tc_from_up(cfg, direction, up); + LOG_DEBUG("up[%u] --- up2tc_map[%u]\n", up, map[up]); + } + + return; +} + +static void sxe_dcb_hw_cee_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, + u8 *prio_tc) +{ + hw->dma.ops->dcb_rx_bw_alloc_configure(hw, refill, max, bwg_id, + prio_type, prio_tc, MAX_USER_PRIORITY); + hw->dma.ops->dcb_tx_desc_bw_alloc_configure(hw, refill, max, + bwg_id, prio_type); + hw->dma.ops->dcb_tx_data_bw_alloc_configure(hw, refill, max, + bwg_id, prio_type, + prio_tc, MAX_USER_PRIORITY); + + return; +} + +static void sxe_dcb_hw_cee_non_bw_alloc_configure(struct sxe_hw *hw) +{ + hw->dma.ops->dcb_tc_stats_configure(hw); + + return; +} + +static void sxe_dcb_hw_cee_configure(struct sxe_hw *hw, + struct sxe_dcb_cee_config *dcb_config) +{ + u8 ptype[MAX_TRAFFIC_CLASS]; + u8 bwgid[MAX_TRAFFIC_CLASS]; + u8 prio_tc[MAX_TRAFFIC_CLASS]; + u16 refill[MAX_TRAFFIC_CLASS]; + u16 max[MAX_TRAFFIC_CLASS]; + + sxe_dcb_cee_refill_parse(dcb_config, DCB_PATH_TX, refill); + sxe_dcb_cee_max_credits_parse(dcb_config, max); + sxe_dcb_cee_bwgid_parse(dcb_config, DCB_PATH_TX, bwgid); + sxe_dcb_cee_prio_parse(dcb_config, DCB_PATH_TX, ptype); + sxe_dcb_cee_up2tc_map_parse(dcb_config, DCB_PATH_TX, prio_tc); + + sxe_dcb_hw_cee_bw_alloc_configure(hw, refill, max, + bwgid, ptype, prio_tc); + + sxe_dcb_hw_cee_non_bw_alloc_configure(hw); + + return; +} + +static void sxe_dcb_ieee_tc_credits_calculate(u8 *bw, u16 *refill, + u16 *max, u32 max_frame) +{ + u16 min_percent = 100; + u32 min_credit, total_credits; + u8 tc_index; + + min_credit = sxe_dcb_min_credit_get(max_frame); + LOG_DEBUG("min_credit=%u, max_frame=%u\n", min_credit, max_frame); + + for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) { + if (bw[tc_index] < min_percent && bw[tc_index]) { + min_percent = bw[tc_index]; + } + } + LOG_DEBUG("min_percent=%u\n", min_percent); + + total_credits = (min_credit / min_percent) + 1; + LOG_DEBUG("total_credits=%u\n", total_credits); + + for (tc_index = 0; tc_index < MAX_TRAFFIC_CLASS; tc_index++) { + u32 val = min(bw[tc_index] * total_credits, (u32)MAX_CREDIT_REFILL); + + if (val < min_credit) { + val = min_credit; + } + + refill[tc_index] = val; + LOG_DEBUG("tc[%u] credits_refill=%u\n", tc_index, refill[tc_index]); + + max[tc_index] = bw[tc_index] ? (bw[tc_index] * MAX_CREDIT)/SXE_PERCENT_100 : min_credit; + LOG_DEBUG("tc[%u] max_credits=%u\n", tc_index, max[tc_index]); + } + + return; +} + +void sxe_dcb_hw_ets_configure(struct sxe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) +{ + hw->dma.ops->dcb_rx_bw_alloc_configure(hw, refill, max, + bwg_id, prio_type, prio_tc, + MAX_USER_PRIORITY); + + hw->dma.ops->dcb_tx_desc_bw_alloc_configure(hw, refill, max, + bwg_id, prio_type); + hw->dma.ops->dcb_tx_data_bw_alloc_configure(hw, refill, max, bwg_id, + prio_type, prio_tc, + MAX_USER_PRIORITY); + return; +} + +s32 sxe_dcb_hw_ieee_ets_configure(struct sxe_hw *hw, + struct ieee_ets *ets, u32 max_frame) +{ + u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + u8 tc_index; + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + for (tc_index = 0; tc_index < IEEE_8021QAZ_MAX_TCS; tc_index++) { + switch (ets->tc_tsa[tc_index]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[tc_index] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[tc_index] = 0; + break; + default: + LOG_ERROR_BDF("unsupport tsa[%u]=%u\n", + tc_index, ets->tc_tsa[tc_index]); + ret = -EINVAL; + goto l_ret; + } + LOG_DEBUG_BDF("tc[%u] prio_type=%u\n",tc_index, prio_type[tc_index]); + } + + sxe_dcb_ieee_tc_credits_calculate(ets->tc_tx_bw, refill, max, max_frame); + + sxe_dcb_hw_ets_configure(hw, refill, max, + bwg_id, prio_type, ets->prio_tc); + +l_ret: + return ret; +} + +void sxe_dcb_hw_pfc_configure(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc) +{ + hw->dma.ops->dcb_pfc_configure(hw, pfc_en, prio_tc, MAX_USER_PRIORITY); + return; +} + +void sxe_dcb_pfc_configure(struct sxe_adapter *adapter) +{ + u8 pfc_en = 0; + u8 prio_tc[MAX_TRAFFIC_CLASS]; + + struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg; + + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE) { + LOG_DEBUG_BDF("pfc in cee mode\n"); + sxe_dcb_cee_pfc_parse(cee_cfg, &pfc_en); + sxe_dcb_cee_up2tc_map_parse(cee_cfg, DCB_PATH_TX, prio_tc); + } else if (adapter->dcb_ctxt.ieee_ets && adapter->dcb_ctxt.ieee_pfc) { + LOG_DEBUG_BDF("pfc in ieee mode\n"); + pfc_en = adapter->dcb_ctxt.ieee_pfc->pfc_en; + memcpy(prio_tc, adapter->dcb_ctxt.ieee_ets->prio_tc, + sizeof(prio_tc[0]) * MAX_TRAFFIC_CLASS); + } + + if (pfc_en) { + sxe_dcb_hw_pfc_configure(&adapter->hw, pfc_en, prio_tc); + } + + return; +} + +void sxe_dcb_configure(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u32 max_frame = adapter->netdev->mtu + SXE_ETH_DEAD_LOAD; + u16 rss = sxe_rss_num_get(adapter); + + if (!(adapter->cap & SXE_DCB_ENABLE)) { + return; + } + + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE) { + LOG_DEBUG_BDF("dcb in cee mode\n"); + sxe_dcb_cee_tc_credits_calculate(hw, &adapter->dcb_ctxt.cee_cfg, + max_frame, DCB_PATH_TX); + sxe_dcb_cee_tc_credits_calculate(hw, &adapter->dcb_ctxt.cee_cfg, + max_frame, DCB_PATH_RX); + sxe_dcb_hw_cee_configure(hw, &adapter->dcb_ctxt.cee_cfg); + } else if (adapter->dcb_ctxt.ieee_ets && adapter->dcb_ctxt.ieee_pfc) { + LOG_DEBUG_BDF("dcb in ieee mode\n"); + sxe_dcb_hw_ieee_ets_configure(&adapter->hw, + adapter->dcb_ctxt.ieee_ets, + max_frame); + } + + hw->dbu.ops->dcb_tc_rss_configure(hw, rss); + + return; +} + +void sxe_dcb_exit(struct sxe_adapter* adapter) +{ + if (adapter->dcb_ctxt.ieee_pfc) { + kfree(adapter->dcb_ctxt.ieee_pfc); + } + + if (adapter->dcb_ctxt.ieee_ets) { + kfree(adapter->dcb_ctxt.ieee_ets); + } + + return; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.h new file mode 100644 index 000000000000..27ab6a67eed9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb.h @@ -0,0 +1,125 @@ +#ifndef __SXE_DCB_H__ +#define __SXE_DCB_H__ + +#ifdef SXE_DCB_CONFIGURE +#include +#include "sxe_hw.h" + +extern const struct dcbnl_rtnl_ops sxe_dcbnl_ops; +struct sxe_adapter; +#endif + +#define SXE_MAX_PACKET_BUFFERS 8 +#define MAX_USER_PRIORITY 8 +#define MAX_BW_GROUP 8 +#define BW_PERCENT 100 + +enum { + DCB_PATH_TX = 0, + DCB_PATH_RX = 1, + DCB_PATH_NUM = DCB_PATH_RX + 1, +}; + +#define DCB_ERR_CONFIG 1 + +#define DCB_ERR_BW_GROUP -3 +#define DCB_ERR_TC_BW -4 +#define DCB_ERR_LS_GS -5 +#define DCB_ERR_LS_BW_NONZERO -6 +#define DCB_ERR_LS_BWG_NONZERO -7 +#define DCB_ERR_TC_BW_ZERO -8 + +#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF + +#define SXE_DCB_MMW_SIZE_DEFAULT 0x04 + +#define SXE_PERCENT_100 100 + +#define SXE_DCB_PG_SUPPORT 0x00000001 +#define SXE_DCB_PFC_SUPPORT 0x00000002 +#define SXE_DCB_BCN_SUPPORT 0x00000004 +#define SXE_DCB_UP2TC_SUPPORT 0x00000008 +#define SXE_DCB_GSP_SUPPORT 0x00000010 +#define SXE_DCB_8_TC_SUPPORT 0x80 + +#define DCB_CREDIT_QUANTUM 64 +#define MAX_CREDIT_REFILL 511 +#define DCB_MAX_TSO_SIZE (32*1024) +#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) +#define MAX_CREDIT 4095 + +struct sxe_tc_bw_alloc { + u8 bwg_id; + u8 bwg_percent; + u8 link_percent; + u8 up_to_tc_bitmap; + u16 data_credits_refill; + u16 data_credits_max; + enum sxe_strict_prio_type prio_type; +}; + +enum sxe_dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +struct sxe_tc_config { + struct sxe_tc_bw_alloc channel[DCB_PATH_NUM]; + enum sxe_dcb_pfc_type pfc_type; + + u16 desc_credits_max; +}; + +struct sxe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct sxe_dcb_cee_config { + struct sxe_dcb_num_tcs num_tcs; + struct sxe_tc_config tc_config[MAX_TRAFFIC_CLASS]; + u8 bwg_link_percent[DCB_PATH_NUM][MAX_BW_GROUP]; + bool pfc_mode_enable; +}; + +#ifdef SXE_DCB_CONFIGURE + +void sxe_dcb_init(struct sxe_adapter *adapter); +void sxe_dcb_configure(struct sxe_adapter *adapter); +u8 sxe_dcb_cee_get_tc_from_up(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 up); +s32 sxe_dcb_cee_tc_credits_calculate(struct sxe_hw *hw, + struct sxe_dcb_cee_config *dcb_config, + u32 max_frame, u8 direction); + +void sxe_dcb_cee_refill_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u16 *refill); +void sxe_dcb_cee_max_credits_parse(struct sxe_dcb_cee_config *cfg, + u16 *max_credits); +void sxe_dcb_cee_bwgid_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *bwgid); +void sxe_dcb_cee_prio_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *ptype); +void sxe_dcb_cee_up2tc_map_parse(struct sxe_dcb_cee_config *cfg, + u8 direction, u8 *map); +void sxe_dcb_cee_pfc_parse(struct sxe_dcb_cee_config *cfg, u8 *pfc_en); +void sxe_dcb_hw_pfc_configure(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc); +void sxe_dcb_hw_ets_configure(struct sxe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc); +s32 sxe_dcb_hw_ieee_ets_configure(struct sxe_hw *hw, + struct ieee_ets *ets, u32 max_frame); +void sxe_dcb_pfc_configure(struct sxe_adapter *adapter); + +void sxe_dcb_exit(struct sxe_adapter* adapter); + +s32 sxe_dcb_tc_validate(struct sxe_adapter *adapter, u8 tc); + +s32 sxe_dcb_tc_setup(struct sxe_adapter *adapter, u8 tc); + +#endif + +void sxe_rx_drop_mode_set(struct sxe_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb_nl.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb_nl.c new file mode 100644 index 000000000000..867173f28000 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_dcb_nl.c @@ -0,0 +1,1026 @@ +#include "sxe.h" + +#ifdef SXE_DCB_CONFIGURE +#include +#include "sxe_phy.h" +#include "sxe_dcb.h" +#include "sxe_sriov.h" +#include "sxe_netdev.h" + +#define BIT_PFC 0x02 +#define BIT_PG_RX 0x04 +#define BIT_PG_TX 0x08 +#define BIT_APP_UPCHG 0x10 + +#define DCB_HW_CHG_RST 0 +#define DCB_NO_HW_CHG 1 +#define DCB_HW_CHG 2 + +s32 sxe_dcb_tc_validate(struct sxe_adapter *adapter, u8 tc) +{ + s32 ret = 0; + + if (tc > adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs) { + LOG_ERROR_BDF(" tc num [%u] is invalid, max tc num=%u\n", + tc, adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs); + ret = -EINVAL; + } + + return ret; +} + +static void sxe_prio_tc_map_set(struct sxe_adapter *adapter) +{ + u8 prio; + u8 tc = 0; + struct net_device *dev = adapter->netdev; + struct sxe_dcb_cee_config *cee_cfg = &adapter->dcb_ctxt.cee_cfg; + struct ieee_ets *ets = adapter->dcb_ctxt.ieee_ets; + + for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { + if (adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE) { + tc = sxe_dcb_cee_get_tc_from_up(cee_cfg, + DCB_PATH_TX, prio); + } else if (ets) { + tc = ets->prio_tc[prio]; + } + + netdev_set_prio_tc_map(dev, prio, tc); + } + + return ; +} + +s32 sxe_dcb_tc_setup(struct sxe_adapter *adapter, u8 tc) +{ + s32 ret = 0; + struct net_device *netdev = adapter->netdev; + struct sxe_hw *hw = &adapter->hw; + + LOG_DEBUG_BDF("current dcb state=%x, tc_num=%u, cfg tc_num=%u\n", + !!(adapter->cap & SXE_DCB_ENABLE), sxe_dcb_tc_get(adapter), tc); + + if (tc) { + if (adapter->xdp_prog) { + LOG_MSG_WARN(probe, "dcb is not supported with xdp\n"); + + sxe_ring_irq_init(adapter); + if (netif_running(netdev)) { + sxe_open(netdev); + } + + ret = -EINVAL; + goto l_ret; + } + + netdev_set_num_tc(netdev, tc); + sxe_prio_tc_map_set(adapter); + + sxe_dcb_tc_set(adapter, tc); + adapter->cap |= SXE_DCB_ENABLE; + LOG_DEBUG_BDF("dcb enble, cfg tc_num=%u\n", tc); + } else { + netdev_reset_tc(netdev); + + adapter->cap &= ~SXE_DCB_ENABLE; + sxe_dcb_tc_set(adapter, tc); + + adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = false; + adapter->dcb_ctxt.cee_cfg.pfc_mode_enable = false; + LOG_DEBUG_BDF("dcb disable, cfg tc_num=%u\n", tc); + } + + hw->dma.ops->dcb_rx_up_tc_map_set(hw, tc); + +l_ret: + return ret; +} + +static u8 sxe_dcbnl_state_get(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("dcb current state=%u\n", !!(adapter->cap & SXE_DCB_ENABLE)); + + return (u8)(!!(adapter->cap & SXE_DCB_ENABLE)); +} + +static u8 sxe_dcbnl_state_set(struct net_device *netdev, u8 state) +{ + s32 ret = 1; + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (!state == !(adapter->cap & SXE_DCB_ENABLE)) { + LOG_INFO_BDF("dcb current state=%x, set state=%x, no change\n", + !!(adapter->cap & SXE_DCB_ENABLE), state); + ret = 0; + goto l_end; + } + + LOG_DEBUG_BDF("dcb current state=%u, set state=%u, setup tc\n", + !!(adapter->cap & SXE_DCB_ENABLE), state); + + ret = !!sxe_ring_reassign(adapter, state ? \ + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs : 0); + +l_end: + return ret; +} + +static void sxe_dcbnl_perm_addr_get(struct net_device *netdev, + u8 *perm_addr) +{ + u32 i; + struct sxe_adapter *adapter = netdev_priv(netdev); + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + + for (i = 0; i < netdev->addr_len; i++) { + perm_addr[i] = adapter->mac_filter_ctxt.def_mac_addr[i]; + } + LOG_DEBUG_BDF("perm_addr=%pM\n", perm_addr); + + return; +} + +static void sxe_dcbnl_tx_pg_tc_cfg_set(struct net_device *netdev, int tc, + u8 prio_type, u8 bwg_id, u8 bwg_pct, + u8 up_map) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt; + + LOG_DEBUG_BDF("tx pg tc config, tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u, up_map=%u\n", + tc, prio_type, bwg_id, bwg_pct, up_map); + + if (prio_type != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].prio_type = prio_type; + } + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].bwg_id = bwg_id; + } + if (bwg_pct != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].bwg_percent = bwg_pct; + } + if (up_map != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].up_to_tc_bitmap = up_map; + } + + return; +} + +static void sxe_dcbnl_tx_pg_bwg_cfg_set(struct net_device *netdev, + int bwg_id, u8 bwg_pct) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("tx bw config, bwg_id=%d, bwg_pct=%u\n", bwg_id, bwg_pct); + + adapter->dcb_ctxt.cee_temp_cfg.\ + bwg_link_percent[DCB_PATH_TX][bwg_id] = bwg_pct; + + return; +} + +static void sxe_dcbnl_rx_pg_tc_cfg_set(struct net_device *netdev, int tc, + u8 prio_type, u8 bwg_id, u8 bwg_pct, + u8 up_map) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt; + + LOG_DEBUG_BDF("rx pg tc config, tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u, up_map=%u\n", + tc, prio_type, bwg_id, bwg_pct, up_map); + + if (prio_type != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].prio_type = prio_type; + } + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].bwg_id = bwg_id; + } + if (bwg_pct != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].bwg_percent = bwg_pct; + } + if (up_map != DCB_ATTR_VALUE_UNDEFINED) { + dcb_ctxt->cee_temp_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].up_to_tc_bitmap = up_map; + } + + return; +} + +static void sxe_dcbnl_rx_pg_bwg_cfg_set(struct net_device *netdev, + int bwg_id, u8 bwg_pct) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("rx bw config, bwg_id=%d, bwg_pct=%u\n", bwg_id, bwg_pct); + + adapter->dcb_ctxt.cee_temp_cfg.\ + bwg_link_percent[DCB_PATH_RX][bwg_id] = bwg_pct; + + return; +} + +static void sxe_dcbnl_tx_pg_tc_cfg_get(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bwg_pct, + u8 *up_map) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt; + + *prio = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].prio_type; + *bwg_id = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].bwg_id; + *bwg_pct = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].bwg_percent; + *up_map = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_TX].up_to_tc_bitmap; + + LOG_DEBUG_BDF("get tx pg cfg: tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u, up_map=%u\n", + tc, *prio, *bwg_id, *bwg_pct, *up_map); + + return; +} + +static void sxe_dcbnl_tx_pg_bwg_cfg_get(struct net_device *netdev, int bwg_id, + u8 *bwg_pct) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + *bwg_pct = adapter->dcb_ctxt.cee_cfg.\ + bwg_link_percent[DCB_PATH_TX][bwg_id]; + + LOG_DEBUG_BDF("get tx bwg cfg: bwg_id=%u, bwg_pct=%d\n", bwg_id, *bwg_pct); + + return; +} + +static void sxe_dcbnl_rx_pg_tc_cfg_get(struct net_device *netdev, int tc, + u8 *prio, u8 *bwg_id, u8 *bwg_pct, + u8 *up_map) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_dcb_context *dcb_ctxt = &adapter->dcb_ctxt; + + LOG_DEBUG_BDF("get rx pg cfg: tc=%d, prio=%u, bwg_id=%u, bwg_pct=%u, up_map=%u\n", + tc, *prio, *bwg_id, *bwg_pct, *up_map); + + *prio = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].prio_type; + *bwg_id = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].bwg_id; + *bwg_pct = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].bwg_percent; + *up_map = dcb_ctxt->cee_cfg.tc_config[tc].\ + channel[DCB_PATH_RX].up_to_tc_bitmap; + + return; +} + +static void sxe_dcbnl_rx_pg_bwg_cfg_get(struct net_device *netdev, int bwg_id, + u8 *bwg_pct) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + *bwg_pct = adapter->dcb_ctxt.cee_cfg.bwg_link_percent[DCB_PATH_RX][bwg_id]; + + LOG_DEBUG_BDF("get rx bwg cfg: bwg_id=%d, bwg_pct=%u\n", bwg_id, *bwg_pct); + + return; +} + +static void sxe_dcbnl_pfc_cfg_set(struct net_device *netdev, int tc, + u8 setting) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + adapter->dcb_ctxt.cee_temp_cfg.tc_config[tc].pfc_type = setting; + if (adapter->dcb_ctxt.cee_temp_cfg.tc_config[tc].pfc_type != + adapter->dcb_ctxt.cee_cfg.tc_config[tc].pfc_type) { + adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = true; + } + + LOG_DEBUG_BDF("set pfc: tc=%d, setting=%u\n", tc, setting); + + return; +} + +static void sxe_dcbnl_pfc_cfg_get(struct net_device *netdev, int tc, + u8 *setting) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + *setting = adapter->dcb_ctxt.cee_cfg.tc_config[tc].pfc_type; + + LOG_DEBUG_BDF("get pfc: priority=%d, setting=%u\n", tc, *setting); + + return; +} + +static s32 sxe_dcb_cfg_copy(struct sxe_adapter *adapter, int tc_max) +{ + u32 i; + u32 changes = 0; + u32 tx = DCB_PATH_TX; + u32 rx = DCB_PATH_RX; + struct sxe_tc_config *src; + struct sxe_tc_config *dst; + struct sxe_dcb_cee_config *scfg = &adapter->dcb_ctxt.cee_temp_cfg; + struct sxe_dcb_cee_config *dcfg = &adapter->dcb_ctxt.cee_cfg; + + for (i = 0; i < tc_max; i++) { + src = &scfg->tc_config[i]; + dst = &dcfg->tc_config[i]; + + if (dst->channel[tx].prio_type != + src->channel[tx].prio_type) { + dst->channel[tx].prio_type = + src->channel[tx].prio_type; + changes |= BIT_PG_TX; + } + + if (dst->channel[tx].bwg_id != + src->channel[tx].bwg_id) { + dst->channel[tx].bwg_id = + src->channel[tx].bwg_id; + changes |= BIT_PG_TX; + } + + if (dst->channel[tx].bwg_percent != + src->channel[tx].bwg_percent) { + dst->channel[tx].bwg_percent = + src->channel[tx].bwg_percent; + changes |= BIT_PG_TX; + } + + if (dst->channel[tx].up_to_tc_bitmap != + src->channel[tx].up_to_tc_bitmap) { + dst->channel[tx].up_to_tc_bitmap = + src->channel[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + + if (dst->channel[rx].prio_type != + src->channel[rx].prio_type) { + dst->channel[rx].prio_type = + src->channel[rx].prio_type; + changes |= BIT_PG_RX; + } + + if (dst->channel[rx].bwg_id != + src->channel[rx].bwg_id) { + dst->channel[rx].bwg_id = + src->channel[rx].bwg_id; + changes |= BIT_PG_RX; + } + + if (dst->channel[rx].bwg_percent != + src->channel[rx].bwg_percent) { + dst->channel[rx].bwg_percent = + src->channel[rx].bwg_percent; + changes |= BIT_PG_RX; + } + + if (dst->channel[rx].up_to_tc_bitmap != + src->channel[rx].up_to_tc_bitmap) { + dst->channel[rx].up_to_tc_bitmap = + src->channel[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); + } + } + + for (i = 0; i < SXE_DCB_TC_MAX; i++) { + if (dcfg->bwg_link_percent[tx][i] != + scfg->bwg_link_percent[tx][i]) { + dcfg->bwg_link_percent[tx][i] = + scfg->bwg_link_percent[tx][i]; + changes |= BIT_PG_TX; + } + if (dcfg->bwg_link_percent[rx][i] != + scfg->bwg_link_percent[rx][i]) { + dcfg->bwg_link_percent[rx][i] = + scfg->bwg_link_percent[rx][i]; + changes |= BIT_PG_RX; + } + } + + for (i = 0; i < SXE_DCB_TC_MAX; i++) { + if (dcfg->tc_config[i].pfc_type != scfg->tc_config[i].pfc_type) { + dcfg->tc_config[i].pfc_type = scfg->tc_config[i].pfc_type; + changes |= BIT_PFC; + } + } + + if (dcfg->pfc_mode_enable != scfg->pfc_mode_enable) { + dcfg->pfc_mode_enable = scfg->pfc_mode_enable; + changes |= BIT_PFC; + } + + LOG_DEBUG_BDF("cee cfg cpy, change cfg=%x\n", changes); + + return changes; +} + +static u8 sxe_dcbnl_cee_configure(struct net_device *netdev) +{ + u32 i; + u8 pfc_en; + u32 max_frame; + u8 ret = DCB_NO_HW_CHG; + u8 prio_tc[MAX_USER_PRIORITY]; + u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; + u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + struct sxe_dcb_cee_config *dcb_cfg = &adapter->dcb_ctxt.cee_cfg; + + LOG_DEBUG_BDF("dcbnl cfg setall\n"); + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) { + LOG_DEBUG_BDF("not cee mode, settings are not supported\n"); + ret = DCB_NO_HW_CHG; + goto l_end; + } + + adapter->dcb_ctxt.cee_cfg_bitmap |= + sxe_dcb_cfg_copy(adapter, MAX_TRAFFIC_CLASS); + if (!adapter->dcb_ctxt.cee_cfg_bitmap) { + LOG_DEBUG_BDF("cfg not change\n"); + ret = DCB_NO_HW_CHG; + goto l_end; + } + + if (adapter->dcb_ctxt.cee_cfg_bitmap & (BIT_PG_TX|BIT_PG_RX)) { + max_frame = adapter->netdev->mtu + SXE_ETH_DEAD_LOAD; + + sxe_dcb_cee_tc_credits_calculate(hw, dcb_cfg, max_frame, + DCB_PATH_TX); + sxe_dcb_cee_tc_credits_calculate(hw, dcb_cfg, max_frame, + DCB_PATH_RX); + + sxe_dcb_cee_refill_parse(dcb_cfg, DCB_PATH_TX, refill); + sxe_dcb_cee_max_credits_parse(dcb_cfg, max); + sxe_dcb_cee_bwgid_parse(dcb_cfg, DCB_PATH_TX, bwg_id); + sxe_dcb_cee_prio_parse(dcb_cfg, DCB_PATH_TX, prio_type); + sxe_dcb_cee_up2tc_map_parse(dcb_cfg, DCB_PATH_TX, prio_tc); + + sxe_dcb_hw_ets_configure(hw, refill, max, bwg_id, + prio_type, prio_tc); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); + } + + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_ctxt.cee_cfg_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + sxe_dcb_cee_up2tc_map_parse(dcb_cfg, DCB_PATH_TX, prio_tc); + sxe_dcb_cee_pfc_parse(dcb_cfg, &pfc_en); + sxe_dcb_hw_pfc_configure(hw, pfc_en, prio_tc); + } else { + sxe_fc_enable(adapter); + } + + sxe_rx_drop_mode_set(adapter); + + ret = DCB_HW_CHG; + } + + adapter->dcb_ctxt.cee_cfg_bitmap = 0x0; +l_end: + return ret; +} + +static u8 sxe_dcbnl_all_set(struct net_device *netdev) +{ + return sxe_dcbnl_cee_configure(netdev); +} + +static u8 sxe_dcbnl_cap_get(struct net_device *netdev, int capid, u8 *cap) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcb_ctxt.dcbx_cap; + break; + default: + *cap = false; + break; + } + LOG_DEBUG_BDF("get dcb cap=%x\n", *cap); + + return 0; +} + +static int sxe_dcbnl_num_tcs_get(struct net_device *netdev, int tcid, u8 *num) +{ + int ret = 0; + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (adapter->cap & SXE_DCB_ENABLE) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs; + break; + default: + LOG_ERROR_BDF("feature dont support=%x\n", tcid); + ret = -EINVAL; + } + } else { + LOG_ERROR_BDF("dcb disable\n"); + ret = -EINVAL; + } + + LOG_DEBUG_BDF("tcid=%x, tcs=%u\n", tcid, *num); + + return ret; +} + +static int sxe_dcbnl_num_tcs_set(struct net_device *netdev, int tcid, u8 num) +{ + LOG_WARN("configuring tc is not supported\n"); + return -EINVAL; +} + +static u8 sxe_dcbnl_pfc_state_get(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("pfc state=%x\n", adapter->dcb_ctxt.cee_cfg.pfc_mode_enable); + + return adapter->dcb_ctxt.cee_cfg.pfc_mode_enable; +} + +static void sxe_dcbnl_pfc_state_set(struct net_device *netdev, u8 state) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("current pfc state=%x, set state=%x\n", + adapter->dcb_ctxt.cee_cfg.pfc_mode_enable, state); + + adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = state; + + return; +} + +#ifdef DCBNL_OPS_GETAPP_RETURN_U8 +static u8 sxe_dcbnl_app_get(struct net_device *netdev, u8 idtype, u16 id) +#else +static int sxe_dcbnl_app_get(struct net_device *netdev, u8 idtype, u16 id) +#endif +{ + int ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_CEE)) { + LOG_DEBUG_BDF("not cee mode, not supported get\n"); +#ifdef DCBNL_OPS_GETAPP_RETURN_U8 + ret = 0; +#else + ret = -EINVAL; +#endif + goto l_end; + } + + ret = dcb_getapp(netdev, &app); + LOG_DEBUG_BDF("idtype=%x, id=%x, app=%x\n", idtype, id, ret); + +l_end: + return ret; +} + +static void sxe_dcbnl_devreset(struct net_device *dev) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(dev); + + while (test_and_set_bit(SXE_RESETTING, &adapter->state)) { + usleep_range(1000, 2000); + } + + if (netif_running(dev)) { + dev->netdev_ops->ndo_stop(dev); + } + + sxe_ring_irq_exit(adapter); + ret = sxe_ring_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt ring assign scheme init failed, err=%d\n", ret); + goto l_end; + } + + if (netif_running(dev)) { + dev->netdev_ops->ndo_open(dev); + } + + clear_bit(SXE_RESETTING, &adapter->state); + LOG_DEBUG_BDF("dcbnl reset finish\n"); +l_end: + return; +} + +static int sxe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *hw_ets = adapter->dcb_ctxt.ieee_ets; + + ets->ets_cap = adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs; + + if (!hw_ets) { + LOG_DEBUG_BDF("dont have ets cfg\n"); + goto l_end; + } + + ets->cbs = hw_ets->cbs; + memcpy(ets->tc_tx_bw, hw_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, hw_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, hw_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, hw_ets->prio_tc, sizeof(ets->prio_tc)); + + LOG_DEBUG_BDF("get ets cfg ok\n"); + +l_end: + return 0; +} + +static int sxe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + int ret; + u32 i; + u8 max_tc = 0; + u8 map_chg = 0; + u32 max_frame = dev->mtu + SXE_ETH_DEAD_LOAD; + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_hw *hw = &adapter->hw; + + LOG_DEBUG_BDF("set ets\n"); + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + LOG_ERROR_BDF("not ieee, dont support\n"); + ret = -EINVAL; + goto l_end; + } + + if (!adapter->dcb_ctxt.ieee_ets) { + adapter->dcb_ctxt.ieee_ets = + kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); + if (!adapter->dcb_ctxt.ieee_ets) { + LOG_ERROR_BDF("kmalloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + adapter->dcb_ctxt.ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + } + + hw->dma.ops->dcb_rx_up_tc_map_get(hw, + adapter->dcb_ctxt.ieee_ets->prio_tc); + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > max_tc) { + max_tc = ets->prio_tc[i]; + } + + if (ets->prio_tc[i] != \ + adapter->dcb_ctxt.ieee_ets->prio_tc[i]) { + map_chg = 1; + } + } + + memcpy(adapter->dcb_ctxt.ieee_ets, + ets, sizeof(*adapter->dcb_ctxt.ieee_ets)); + + if (max_tc) { + max_tc++; + } + + if (max_tc > adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs) { + LOG_ERROR_BDF("set tc=%u > max tc=%u\n", max_tc, + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs); + ret = -EINVAL; + goto l_end; + } + + if (max_tc != adapter->dcb_ctxt.hw_tcs) { + ret = sxe_ring_reassign(adapter, max_tc); + if (ret) { + LOG_ERROR_BDF("ring reassign failed, ret=%d\n", ret); + goto l_end; + } + } else if (map_chg) { + sxe_dcbnl_devreset(dev); + } + + ret = sxe_dcb_hw_ieee_ets_configure(&adapter->hw, ets, max_frame); + if (ret) { + LOG_ERROR_BDF("ets config failed, max_frame=%u, ret=%u\n", + max_frame, ret); + } + +l_end: + return ret; +} + +static int sxe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *hw_pfc = adapter->dcb_ctxt.ieee_pfc; + + pfc->pfc_cap = adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs; + + if (!hw_pfc) { + LOG_DEBUG_BDF("dont have pfc cfg\n"); + goto l_end; + } + + pfc->pfc_en = hw_pfc->pfc_en; + pfc->mbc = hw_pfc->mbc; + pfc->delay = hw_pfc->delay; + + LOG_DEBUG_BDF("get pfc cfg ok\n"); + +l_end: + return 0; +} + +static int sxe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + int ret = 0; + u8 *prio_tc; + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_hw *hw = &adapter->hw; + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + LOG_ERROR_BDF("not ieee, dont support\n"); + ret = -EINVAL; + goto l_end; + } + + if (!adapter->dcb_ctxt.ieee_pfc) { + adapter->dcb_ctxt.ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->dcb_ctxt.ieee_pfc) { + LOG_ERROR_BDF("kmalloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + } + + prio_tc = adapter->dcb_ctxt.ieee_ets->prio_tc; + memcpy(adapter->dcb_ctxt.ieee_pfc, pfc, + sizeof(*adapter->dcb_ctxt.ieee_pfc)); + + if (pfc->pfc_en) { + sxe_dcb_hw_pfc_configure(hw, pfc->pfc_en, prio_tc); + } else { + sxe_fc_enable(adapter); + } + + sxe_rx_drop_mode_set(adapter); + +l_end: + return ret; +} + +static int sxe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + int ret; + u32 vf; + struct sxe_vf_info *vfinfo; + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_hw *hw = &adapter->hw; + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + LOG_ERROR_BDF("not ieee, dont support\n"); + ret = -EINVAL; + goto l_end; + } + + ret = dcb_ieee_setapp(dev, app); + if (ret) { + LOG_ERROR_BDF("set app failed, ret=%d\n", ret); + goto l_end; + } + + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0) { + adapter->dcb_ctxt.default_up = app->priority; + + for (vf = 0; vf < adapter->vt_ctxt.num_vfs; vf++) { + vfinfo = &adapter->vt_ctxt.vf_info[vf]; + + if (!vfinfo->pf_qos) { + hw->dma.ops->tx_vlan_tag_set(hw, vfinfo->pf_vlan, + app->priority, vf); + } + } + } + + LOG_DEBUG_BDF("set app ok\n"); + +l_end: + return ret; +} + +static int sxe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + int ret; + u32 vf; + u16 qos; + long unsigned int app_mask; + struct sxe_vf_info *vfinfo; + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_hw *hw = &adapter->hw; + + if (!(adapter->dcb_ctxt.dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) { + LOG_ERROR_BDF("not ieee, dont support\n"); + ret = -EINVAL; + goto l_end; + } + + ret = dcb_ieee_delapp(dev, app); + if (ret) { + LOG_ERROR_BDF("del app failed, ret=%d\n", ret); + goto l_end; + } + + if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == 0 && adapter->dcb_ctxt.default_up == app->priority) { + app_mask = dcb_ieee_getapp_mask(dev, app); + qos = app_mask ? find_first_bit(&app_mask, 8) : 0; + + adapter->dcb_ctxt.default_up = qos; + + for (vf = 0; vf < adapter->vt_ctxt.num_vfs; vf++) { + vfinfo = &adapter->vt_ctxt.vf_info[vf]; + + if (!vfinfo->pf_qos) { + hw->dma.ops->tx_vlan_tag_set(hw, vfinfo->pf_vlan, + qos, vf); + } + } + } + + LOG_DEBUG_BDF("del app ok\n"); + +l_end: + return ret; +} + +static u8 sxe_dcbnl_dcbx_get(struct net_device *dev) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + + LOG_DEBUG_BDF("dcbx cap=%x\n", adapter->dcb_ctxt.dcbx_cap); + + return adapter->dcb_ctxt.dcbx_cap; +} + +static u8 sxe_dcbnl_dcbx_set(struct net_device *dev, u8 mode) +{ + u8 ret = 0; + s32 err = 0; + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + struct sxe_adapter *adapter = netdev_priv(dev); + + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) { + LOG_ERROR_BDF("dont support mode=%x\n", mode); + ret = 1; + goto l_end; + } + + if (mode == adapter->dcb_ctxt.dcbx_cap) { + goto l_end; + } + + adapter->dcb_ctxt.dcbx_cap = mode; + + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + sxe_dcbnl_ieee_setets(dev, &ets); + sxe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; + + adapter->dcb_ctxt.cee_cfg_bitmap |= mask; + sxe_dcbnl_cee_configure(dev); + } else { + sxe_dcbnl_ieee_setets(dev, &ets); + sxe_dcbnl_ieee_setpfc(dev, &pfc); + err = sxe_ring_reassign(adapter, 0); + if (err) { + LOG_ERROR_BDF("ring reassign failed, err=%d\n", err); + ret = 1; + } + } +l_end: + return ret; +} + +const struct dcbnl_rtnl_ops sxe_dcbnl_ops = { + .ieee_getets = sxe_dcbnl_ieee_getets, + .ieee_setets = sxe_dcbnl_ieee_setets, + .ieee_getpfc = sxe_dcbnl_ieee_getpfc, + .ieee_setpfc = sxe_dcbnl_ieee_setpfc, + .ieee_setapp = sxe_dcbnl_ieee_setapp, + .ieee_delapp = sxe_dcbnl_ieee_delapp, + + .getstate = sxe_dcbnl_state_get, + .setstate = sxe_dcbnl_state_set, + .getpermhwaddr = sxe_dcbnl_perm_addr_get, + .setpgtccfgtx = sxe_dcbnl_tx_pg_tc_cfg_set, + .setpgbwgcfgtx = sxe_dcbnl_tx_pg_bwg_cfg_set, + .setpgtccfgrx = sxe_dcbnl_rx_pg_tc_cfg_set, + .setpgbwgcfgrx = sxe_dcbnl_rx_pg_bwg_cfg_set, + .getpgtccfgtx = sxe_dcbnl_tx_pg_tc_cfg_get, + .getpgbwgcfgtx = sxe_dcbnl_tx_pg_bwg_cfg_get, + .getpgtccfgrx = sxe_dcbnl_rx_pg_tc_cfg_get, + .getpgbwgcfgrx = sxe_dcbnl_rx_pg_bwg_cfg_get, + .setpfccfg = sxe_dcbnl_pfc_cfg_set, + .getpfccfg = sxe_dcbnl_pfc_cfg_get, + .setall = sxe_dcbnl_all_set, + .getcap = sxe_dcbnl_cap_get, + .getnumtcs = sxe_dcbnl_num_tcs_get, + .setnumtcs = sxe_dcbnl_num_tcs_set, + .getpfcstate = sxe_dcbnl_pfc_state_get, + .setpfcstate = sxe_dcbnl_pfc_state_set, + .getapp = sxe_dcbnl_app_get, + .getdcbx = sxe_dcbnl_dcbx_get, + .setdcbx = sxe_dcbnl_dcbx_set, +}; +#endif + +void sxe_rx_drop_mode_set(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_hw *hw = &adapter->hw; + bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable; + u32 current_mode = hw->mac.ops->fc_current_mode_get(hw); + +#ifdef SXE_DCB_CONFIGURE + if (adapter->dcb_ctxt.ieee_pfc) { + pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en); + } +#endif + + if (adapter->vt_ctxt.num_vfs || (adapter->rx_ring_ctxt.num > 1 && + !(current_mode & SXE_FC_TX_PAUSE) && !pfc_en)) { + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + hw->dma.ops->rx_drop_switch(hw, + adapter->rx_ring_ctxt.ring[i]->reg_idx, true); + } + } else { + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + hw->dma.ops->rx_drop_switch(hw, + adapter->rx_ring_ctxt.ring[i]->reg_idx, false); + } + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.c new file mode 100644 index 000000000000..4b1fcf8f77b4 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.c @@ -0,0 +1,26 @@ + +#include +#include +#include + +#include "sxe_debug.h" + +#define SKB_DESCRIPTION_LEN 256 +void sxe_dump_skb(struct sk_buff *skb) +{ +#ifndef SXE_DRIVER_RELEASE + u32 len = skb->len; + u32 data_len = skb->data_len; +#endif + + s8 desc[SKB_DESCRIPTION_LEN] = {}; + + snprintf(desc, SKB_DESCRIPTION_LEN, + "skb addr:0x%llx %s", (u64)skb, "linear region"); +#ifndef SXE_DRIVER_RELEASE + sxe_log_binary(__FILE__, __FUNCTION__, __LINE__, + (u8 *)skb->data, (u64)skb, min_t(u32, len - data_len, 256), desc); +#endif + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.h new file mode 100644 index 000000000000..6355cad0df96 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debug.h @@ -0,0 +1,17 @@ + +#ifndef __SXE_DEBUG_H__ +#define __SXE_DEBUG_H__ + +#include +#include "sxe_log.h" + +void sxe_dump_skb(struct sk_buff *skb); + +#if defined SXE_DRIVER_RELEASE +#define SKB_DUMP(skb) +#else +#define SKB_DUMP(skb) sxe_dump_skb(skb) +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.c new file mode 100644 index 000000000000..9a2103efc7f2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.c @@ -0,0 +1,427 @@ +#include +#include + +#include "sxe.h" +#include "sxe_netdev.h" +#include "sxe_version.h" +#include "sxe_phy.h" + +#define SXE_HW_STAT(m) sizeof(((struct sxe_adapter *)0)->m), \ + offsetof(struct sxe_adapter, m) + +#define SXE_HW_STATS_LEN ARRAY_SIZE(hw_stats) + +struct sxe_debugfs_hw_stats { + const char *stat_string; + int sizeof_stat; + int stat_offset; +}; + +static const struct sxe_debugfs_hw_stats hw_stats[] = { + {"rx_good_pkts", SXE_HW_STAT(stats.hw.gprc)}, + {"tx_good_pkts", SXE_HW_STAT(stats.hw.gptc)}, + {"rx_good_bytes", SXE_HW_STAT(stats.hw.gorc)}, + {"tx_good_bytes", SXE_HW_STAT(stats.hw.gotc)}, + {"rx_broadcast", SXE_HW_STAT(stats.hw.bprc)}, + {"tx_broadcast", SXE_HW_STAT(stats.hw.bptc)}, + {"rx_multicast", SXE_HW_STAT(stats.hw.mprc)}, + {"tx_multicast", SXE_HW_STAT(stats.hw.mptc)}, + {"fnav_match", SXE_HW_STAT(stats.hw.fnavmatch)}, + {"fnav_miss", SXE_HW_STAT(stats.hw.fnavmiss)}, + {"rx_64_bytes", SXE_HW_STAT(stats.hw.prc64)}, + {"rx_65~127_bytes", SXE_HW_STAT(stats.hw.prc127)}, + {"rx_128~255_bytes", SXE_HW_STAT(stats.hw.prc255)}, + {"rx_256~511_bytes", SXE_HW_STAT(stats.hw.prc511)}, + {"rx_512~1023_bytes", SXE_HW_STAT(stats.hw.prc1023)}, + {"rx_1024~1522_bytes", SXE_HW_STAT(stats.hw.prc1522)}, + {"tx_64_bytes", SXE_HW_STAT(stats.hw.ptc64)}, + {"tx_65~127_bytes", SXE_HW_STAT(stats.hw.ptc127)}, + {"tx_128~255_bytes", SXE_HW_STAT(stats.hw.ptc255)}, + {"tx_256~511_bytes", SXE_HW_STAT(stats.hw.ptc511)}, + {"tx_512~1023_bytes", SXE_HW_STAT(stats.hw.ptc1023)}, + {"tx_1024~1522_bytes", SXE_HW_STAT(stats.hw.ptc1522)}, + {"rx_total_pkts", SXE_HW_STAT(stats.hw.tpr)}, + {"tx_total_pkts", SXE_HW_STAT(stats.hw.tpt)}, + {"rx_total_bytes", SXE_HW_STAT(stats.hw.tor)}, + {"rx_long_length_errors", SXE_HW_STAT(stats.hw.roc)}, + {"rx_short_length_errors", SXE_HW_STAT(stats.hw.ruc)}, + {"rx_short_length_with_bad_crc_errors", SXE_HW_STAT(stats.hw.rfc)}, + {"rx_crc_error", SXE_HW_STAT(stats.hw.crcerrs)}, + {"rx_error_byte", SXE_HW_STAT(stats.hw.errbc)}, + {"rx_length_errors", SXE_HW_STAT(stats.hw.rlec)}, + {"rx_jabber_errors", SXE_HW_STAT(stats.hw.rjc)}, +}; + +static struct dentry *sxe_debugfs_root; + +STATIC s8 sxe_debugfs_reg_ops_buf[256] = ""; + +STATIC ssize_t sxe_debugfs_common_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos, + char *debugfs_buf) +{ + s8 *buf; + ssize_t ret; + struct sxe_adapter *adapter = filp->private_data; + + if (*ppos != 0) { + ret = 0; + goto l_end; + } + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, debugfs_buf); + if (!buf) { + ret = -ENOMEM; + goto l_end; + } + + if (count < strlen(buf)) { + ret = -ENOSPC; + goto l_free; + } + + ret = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + +l_free: + kfree(buf); +l_end: + return ret; +} + +STATIC ssize_t sxe_debugfs_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return sxe_debugfs_common_ops_read(filp, buffer, count, ppos, + sxe_debugfs_reg_ops_buf); +} + +STATIC ssize_t sxe_debugfs_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t ret; + s32 cnt; + u32 reg, value; + struct sxe_adapter *adapter = filp->private_data; + struct sxe_hw *hw = &adapter->hw; + + if (*ppos != 0) { + ret = 0; + goto l_end; + } + + if (count >= sizeof(sxe_debugfs_reg_ops_buf)) { + ret = -ENOSPC; + goto l_end; + } + + ret = simple_write_to_buffer(sxe_debugfs_reg_ops_buf, + sizeof(sxe_debugfs_reg_ops_buf)-1, + ppos, buffer, count); + if (ret < 0) { + goto l_end; + } + + sxe_debugfs_reg_ops_buf[ret] = '\0'; + + if (strncmp(sxe_debugfs_reg_ops_buf, "write", 5) == 0) { + cnt = sscanf(&sxe_debugfs_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt != 2) { + LOG_DEV_INFO("write \n"); + ret = count; + goto l_end; + } + + if (reg >= pci_resource_len(adapter->pdev, 0)) { + LOG_DEV_INFO("write ops : reg addr err, " + "addr[%x]>bar0 max addr[0x100000]", reg); + ret = -EINVAL; + goto l_end; + } + + hw->setup.ops->reg_write(hw, reg, value); + value = hw->setup.ops->reg_read(hw, reg); + LOG_DEV_INFO("write: 0x%08x = 0x%08x\n", reg, value); + + } else if (strncmp(sxe_debugfs_reg_ops_buf, "read", 4) == 0) { + cnt = sscanf(&sxe_debugfs_reg_ops_buf[4], "%x", ®); + if (cnt != 1) { + LOG_DEV_INFO("read \n"); + ret = count; + goto l_end; + } + + if (reg >= pci_resource_len(adapter->pdev, 0)) { + LOG_DEV_INFO("read ops : reg addr err, " + "addr[%x]>bar0 max addr[0x100000]", reg); + ret = -EINVAL; + goto l_end; + } + + value = hw->setup.ops->reg_read(hw, reg); + LOG_DEV_INFO("read 0x%08x = 0x%08x\n", reg, value); + } else { + LOG_DEV_INFO("unknown command %s\n", sxe_debugfs_reg_ops_buf); + LOG_DEV_INFO("available commands:\n"); + LOG_DEV_INFO(" read \n"); + LOG_DEV_INFO(" write \n"); + } + + ret = count; + +l_end: + return ret; +} + +static const struct file_operations sxe_debugfs_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe_debugfs_reg_ops_read, + .write = sxe_debugfs_reg_ops_write, +}; + +STATIC s8 debugfs_netdev_buf[256] = ""; + +STATIC ssize_t sxe_debugfs_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return sxe_debugfs_common_ops_read(filp, buffer, count, ppos, + debugfs_netdev_buf); +} + +STATIC ssize_t sxe_debugfs_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t ret; + struct sxe_adapter *adapter = filp->private_data; + + if (*ppos != 0) { + ret = 0; + goto l_end; + } + if (count >= sizeof(debugfs_netdev_buf)) { + ret = -ENOSPC; + goto l_end; + } + + ret = simple_write_to_buffer(debugfs_netdev_buf, + sizeof(debugfs_netdev_buf)-1, + ppos, buffer, count); + if (ret < 0) { + goto l_end; + } + + debugfs_netdev_buf[ret] = '\0'; + + if (0 == strncmp(debugfs_netdev_buf, "tx_timeout", 10)) { + +#ifdef HAVE_TIMEOUT_TXQUEUE_IDX + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, UINT_MAX); +#else + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); +#endif + LOG_DEV_INFO("tx_timeout called\n"); + } else { + LOG_DEV_INFO("unknown command: %s\n", debugfs_netdev_buf); + LOG_DEV_INFO("available commands:\n"); + LOG_DEV_INFO(" tx_timeout\n"); + } + + ret = count; + +l_end: + return ret; +} + +static const struct file_operations sxe_debugfs_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe_debugfs_netdev_ops_read, + .write = sxe_debugfs_netdev_ops_write, +}; + +STATIC ssize_t sxe_debugfs_hw_stats_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + u32 i; + u64 value; + s8 *offset; + struct sxe_debugfs_hw_stats *hw_stats_t; + struct sxe_adapter *adapter = filp->private_data; + hw_stats_t = kzalloc(sizeof(struct sxe_debugfs_hw_stats) * SXE_HW_STATS_LEN, GFP_ATOMIC); + + stats_lock(adapter); + sxe_stats_update(adapter); + memcpy(hw_stats_t, hw_stats, sizeof(struct sxe_debugfs_hw_stats) * SXE_HW_STATS_LEN); + stats_unlock(adapter); + + for (i = 0; i < SXE_HW_STATS_LEN; i++) { + offset = (s8 *) adapter + hw_stats_t[i].stat_offset; + + value = (hw_stats_t[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)offset : *(u32 *)offset; + + LOG_DEV_INFO("%s: %llu\n", hw_stats_t[i].stat_string, value); + } + kfree(hw_stats_t); + + return 0; +} + +static const struct file_operations sxe_debugfs_hw_stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe_debugfs_hw_stats_read, +}; + +STATIC ssize_t sxe_debugfs_sfp_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct sxe_adapter *adapter = filp->private_data; + + s32 ret; + enum sxe_sfp_type sfp_type; + u8 sfp_comp_code[SXE_SFP_COMP_CODE_SIZE]; + u8 sfp_vendor_pn[SXE_SFP_VENDOR_PN_SIZE + 1] = {0}; + + LOG_INFO_BDF("sfp identify start\n"); + + ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_BASE_ADDR, + SXE_SFP_COMP_CODE_SIZE, sfp_comp_code); + if (ret) { + sfp_type = SXE_SFP_TYPE_NOT_PRESENT; + LOG_DEV_ERR("get sfp identifier failed, ret=%d\n", ret); + goto l_end; + } + + LOG_DEV_INFO("sfp identifier=%x, cable_technology=%x, " + "10GB_code=%x, 1GB_code=%x\n", + sfp_comp_code[SXE_SFF_IDENTIFIER], + sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY], + sfp_comp_code[SXE_SFF_10GBE_COMP_CODES], + sfp_comp_code[SXE_SFF_1GBE_COMP_CODES]); + + if (sfp_comp_code[SXE_SFF_IDENTIFIER] != SXE_SFF_IDENTIFIER_SFP) { + LOG_DEV_ERR("module type is not sfp/sfp+, offset=%d, type=%x\n", + SXE_SFF_IDENTIFIER, sfp_comp_code[SXE_SFF_IDENTIFIER]); + sfp_type = SXE_SFP_TYPE_UNKNOWN; + goto l_end; + } + + if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] & SXE_SFF_DA_PASSIVE_CABLE) { + sfp_type = SXE_SFP_TYPE_DA_CU; + } else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + (SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) { + sfp_type = SXE_SFP_TYPE_SRLR; + } else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASET_CAPABLE) { + sfp_type = SXE_SFP_TYPE_1G_CU; + } else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASESX_CAPABLE) || \ + (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASELX_CAPABLE)) { + sfp_type = SXE_SFP_TYPE_1G_SXLX; + } else { + sfp_type = SXE_SFP_TYPE_UNKNOWN; + } + + LOG_DEV_INFO("identify sfp, sfp_type=%d\n", sfp_type); + + if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASESX_CAPABLE) && + (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + SXE_SFF_10GBASESR_CAPABLE)) || + ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASELX_CAPABLE) && + (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + SXE_SFF_10GBASELR_CAPABLE))) { + LOG_DEV_INFO("identify sfp, sfp is multispeed\n"); + } else { + LOG_DEV_INFO("identify sfp, sfp is not multispeed\n"); + } + + ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_VENDOR_PN, + SXE_SFP_VENDOR_PN_SIZE, sfp_vendor_pn); + if (ret) { + LOG_DEV_ERR("get sfp vendor pn failed, ret=%d\n", ret); + goto l_end; + } + + LOG_DEV_INFO("sfp vendor pn: %s\n", sfp_vendor_pn); + + ret = sxe_sfp_vendor_pn_cmp(sfp_vendor_pn); + if (0 == ret) { + LOG_DEV_WARN("an supported SFP module type was detected\n"); + goto l_end; + } + + LOG_DEV_WARN("an unsupported SFP module type was detected\n"); + LOG_DEV_WARN("refer to the sxe ethernet adapters and devices user " + "guide for a list of supported modules\n"); + +l_end: + return 0; +} + +static const struct file_operations sxe_debugfs_sfp_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = sxe_debugfs_sfp_info_read, +}; + +void sxe_debugfs_entries_init(struct sxe_adapter *adapter) +{ + struct dentry *dir; + const char *name = pci_name(adapter->pdev); + + adapter->debugfs_entries = debugfs_create_dir(name, sxe_debugfs_root); + dir = debugfs_create_file("reg_ops", 0600, adapter->debugfs_entries, + adapter, &sxe_debugfs_reg_ops_fops); + if ((dir == NULL) || (dir == ERR_PTR(-ENODEV))) { + LOG_INFO_BDF("debugfs:reg_ops file create failed\n"); + } + + dir = debugfs_create_file("netdev_ops", 0600, adapter->debugfs_entries, + adapter, &sxe_debugfs_netdev_ops_fops); + if ((dir == NULL) || (dir == ERR_PTR(-ENODEV))) { + LOG_INFO_BDF("debugfs:netdev_ops file create failed\n"); + } + + dir = debugfs_create_file("hw_stats", 0400, adapter->debugfs_entries, + adapter, &sxe_debugfs_hw_stats_fops); + if ((dir == NULL) || (dir == ERR_PTR(-ENODEV))) { + LOG_INFO_BDF("debugfs:hw_stats file create failed\n"); + } + + dir = debugfs_create_file("sfp_info", 0400, adapter->debugfs_entries, + adapter, &sxe_debugfs_sfp_info_fops); + if ((dir == NULL) || (dir == ERR_PTR(-ENODEV))) { + LOG_INFO_BDF("debugfs:sfp_info file create failed\n"); + } + + return; +} + +void sxe_debugfs_entries_exit(struct sxe_adapter *adapter) +{ + debugfs_remove_recursive(adapter->debugfs_entries); + adapter->debugfs_entries = NULL; + + return; +} + +void sxe_debugfs_init(void) +{ + sxe_debugfs_root = debugfs_create_dir(SXE_DRV_NAME, NULL); + + return; +} + +void sxe_debugfs_exit(void) +{ + debugfs_remove_recursive(sxe_debugfs_root); + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.h new file mode 100644 index 000000000000..ac7f31d37ab9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_debugfs.h @@ -0,0 +1,14 @@ +#ifndef __SXE_DEBUGFS_H__ +#define __SXE_DEBUGFS_H__ + +struct sxe_adapter; + +void sxe_debugfs_entries_init(struct sxe_adapter *adapter); + +void sxe_debugfs_entries_exit(struct sxe_adapter *adapter); + +void sxe_debugfs_init(void); + +void sxe_debugfs_exit(void); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_errno.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_errno.h new file mode 100644 index 000000000000..70724225cc17 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_errno.h @@ -0,0 +1,58 @@ + +#ifndef __SXE_ERRNO_H__ +#define __SXE_ERRNO_H__ + +#define SXE_ERR_MODULE_STANDARD 0 +#define SXE_ERR_MODULE_PF 1 +#define SXE_ERR_MODULE_VF 2 +#define SXE_ERR_MODULE_HDC 3 + +#define SXE_ERR_MODULE_OFFSET 16 +#define SXE_ERR_MODULE(module, errcode) \ + ((module << SXE_ERR_MODULE_OFFSET) | errcode) +#define SXE_ERR_PF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode) +#define SXE_ERR_VF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode) +#define SXE_ERR_HDC(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode) + +#define SXE_ERR_CONFIG EINVAL +#define SXE_ERR_PARAM EINVAL +#define SXE_ERR_RESET_FAILED EPERM +#define SXE_ERR_NO_SPACE ENOSPC +#define SXE_ERR_FNAV_CMD_INCOMPLETE EBUSY +#define SXE_ERR_MBX_LOCK_FAIL EBUSY +#define SXE_ERR_OPRATION_NOT_PERM EPERM +#define SXE_ERR_LINK_STATUS_INVALID EINVAL +#define SXE_ERR_LINK_SPEED_INVALID EINVAL +#define SXE_ERR_DEVICE_NOT_SUPPORTED EOPNOTSUPP +#define SXE_ERR_HDC_LOCK_BUSY EBUSY +#define SXE_ERR_HDC_FW_OV_TIMEOUT ETIMEDOUT +#define SXE_ERR_MDIO_CMD_TIMEOUT ETIMEDOUT +#define SXE_ERR_INVALID_LINK_SETTINGS EINVAL +#define SXE_ERR_FNAV_REINIT_FAILED EIO +#define SXE_ERR_CLI_FAILED EIO +#define SXE_ERR_MASTER_REQUESTS_PENDING SXE_ERR_PF(1) +#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT SXE_ERR_PF(2) +#define SXE_ERR_ENABLE_SRIOV_FAIL SXE_ERR_PF(3) +#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_PF(4) +#define SXE_ERR_SFP_NOT_PERSENT SXE_ERR_PF(5) +#define SXE_ERR_PHY_NOT_PERSENT SXE_ERR_PF(6) +#define SXE_ERR_PHY_RESET_FAIL SXE_ERR_PF(7) +#define SXE_ERR_FC_NOT_NEGOTIATED SXE_ERR_PF(8) +#define SXE_ERR_SFF_NOT_SUPPORTED SXE_ERR_PF(9) + +#define SXEVF_ERR_MAC_ADDR_INVALID EINVAL +#define SXEVF_ERR_RESET_FAILED EIO +#define SXEVF_ERR_ARGUMENT_INVALID EINVAL +#define SXEVF_ERR_NOT_READY EBUSY +#define SXEVF_ERR_POLL_ACK_FAIL EIO +#define SXEVF_ERR_POLL_MSG_FAIL EIO +#define SXEVF_ERR_MBX_LOCK_FAIL EBUSY +#define SXEVF_ERR_REPLY_INVALID EINVAL +#define SXEVF_ERR_IRQ_NUM_INVALID EINVAL +#define SXEVF_ERR_PARAM EINVAL +#define SXEVF_ERR_MAILBOX_FAIL SXE_ERR_VF(1) +#define SXEVF_ERR_MSG_HANDLE_ERR SXE_ERR_VF(2) +#define SXEVF_ERR_DEVICE_NOT_SUPPORTED SXE_ERR_VF(3) +#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_VF(4) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.c new file mode 100644 index 000000000000..829ffe007a3c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.c @@ -0,0 +1,2769 @@ +#include + +#include "sxe_ethtool.h" +#include "sxe_hw.h" +#include "sxe_log.h" +#include "sxe_version.h" +#include "sxe_tx_proc.h" +#include "sxe_netdev.h" +#include "sxe_rx_proc.h" +#include "sxe_filter.h" +#include "sxe_irq.h" +#include "sxe_host_hdc.h" +#include "sxe_phy.h" +#include "sxe_cli.h" + +enum sxe_diag_test_case{ + SXE_DIAG_REGS_TEST = 0, + SXE_DIAG_EEPROM_TEST = 1, + SXE_DIAG_IRQ_TEST = 2, + SXE_DIAG_LOOPBACK_TEST = 3, + SXE_DIAG_LINK_TEST = 4, + SXE_DIAG_TEST_MAX, +}; + +#define SXE_STAT(m) SXE_STATS, \ + sizeof(((struct sxe_adapter *)0)->m), \ + offsetof(struct sxe_adapter, m) +#define SXE_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct rtnl_link_stats64 *)0)->m), \ + offsetof(struct rtnl_link_stats64, m) +#define SXE_MAC_STATS_OFFSET(m) offsetof(struct sxe_mac_stats, m) + +#define SXE_TEST_SLEEP_TIME (2) + +static const char sxe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +static const struct sxe_ethtool_stats sxe_gstrings_stats[] = { + {"rx_packets", SXE_NETDEV_STAT(rx_packets)}, + {"tx_packets", SXE_NETDEV_STAT(tx_packets)}, + {"rx_bytes", SXE_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", SXE_NETDEV_STAT(tx_bytes)}, + {"rx_errors", SXE_NETDEV_STAT(rx_errors)}, + {"tx_errors", SXE_NETDEV_STAT(tx_errors)}, + {"rx_dropped", SXE_NETDEV_STAT(rx_dropped)}, + {"tx_dropped", SXE_NETDEV_STAT(tx_dropped)}, + {"multicast", SXE_NETDEV_STAT(multicast)}, + {"collisions", SXE_NETDEV_STAT(collisions)}, + {"rx_over_errors", SXE_NETDEV_STAT(rx_over_errors)}, + {"rx_crc_errors", SXE_NETDEV_STAT(rx_crc_errors)}, + {"rx_frame_errors", SXE_NETDEV_STAT(rx_frame_errors)}, + {"rx_fifo_errors", SXE_NETDEV_STAT(rx_fifo_errors)}, + {"rx_missed_errors", SXE_NETDEV_STAT(rx_missed_errors)}, + {"tx_aborted_errors", SXE_NETDEV_STAT(tx_aborted_errors)}, + {"tx_carrier_errors", SXE_NETDEV_STAT(tx_carrier_errors)}, + {"tx_fifo_errors", SXE_NETDEV_STAT(tx_fifo_errors)}, + {"tx_heartbeat_errors", SXE_NETDEV_STAT(tx_heartbeat_errors)}, + {"rx_pkts_nic", SXE_STAT(stats.hw.gprc)}, + {"tx_pkts_nic", SXE_STAT(stats.hw.gptc)}, + {"rx_bytes_nic", SXE_STAT(stats.hw.gorc)}, + {"tx_bytes_nic", SXE_STAT(stats.hw.gotc)}, + {"link_state_change_cnt", SXE_STAT(stats.sw.link_state_change_cnt)}, + {"tx_busy", SXE_STAT(stats.sw.tx_busy)}, + {"non_eop_descs", SXE_STAT(stats.sw.non_eop_descs)}, + {"broadcast", SXE_STAT(stats.hw.bprc)}, + {"hw_lro_aggregated", SXE_STAT(stats.sw.lro_total_count)}, + {"hw_lro_flushed", SXE_STAT(stats.sw.lro_total_flush)}, + {"fnav_match", SXE_STAT(stats.hw.fnavmatch)}, + {"fnav_miss", SXE_STAT(stats.hw.fnavmiss)}, + {"fnav_overflow", SXE_STAT(stats.sw.fnav_overflow)}, + {"reset_work_trigger", SXE_STAT(stats.sw.reset_work_trigger_cnt)}, + {"tx_restart_queue", SXE_STAT(stats.sw.restart_queue)}, + {"rx_length_errors", SXE_STAT(stats.hw.rlec)}, + {"rx_long_length_errors", SXE_STAT(stats.hw.roc)}, + {"rx_short_length_errors", SXE_STAT(stats.hw.ruc)}, + {"rx_csum_offload_errors", SXE_STAT(stats.sw.hw_csum_rx_error)}, + {"alloc_rx_page", SXE_STAT(stats.sw.alloc_rx_page)}, + {"alloc_rx_page_failed", SXE_STAT(stats.sw.alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", SXE_STAT(stats.sw.alloc_rx_buff_failed)}, + {"rx_no_dma_resources", SXE_STAT(stats.hw.hw_rx_no_dma_resources)}, + {"tx_hwtstamp_timeouts", SXE_STAT(stats.sw.tx_hwtstamp_timeouts)}, + {"tx_hwtstamp_skipped", SXE_STAT(stats.sw.tx_hwtstamp_skipped)}, + {"rx_hwtstamp_cleared", SXE_STAT(stats.sw.rx_hwtstamp_cleared)}, + {"tx_ipsec", SXE_STAT(stats.sw.tx_ipsec)}, + {"rx_ipsec", SXE_STAT(stats.sw.rx_ipsec)}, +}; + +static const struct sxe_ethtool_stats sxe_gstrings_dma_stats[] = { + {"dma_good_rx_pkts", SXE_STAT(stats.hw.rxdgpc)}, + {"dma_good_rx_bytes", SXE_STAT(stats.hw.rxdgbc)}, + {"dma_good_tx_pkts", SXE_STAT(stats.hw.txdgpc)}, + {"dma_good_tx_bytes", SXE_STAT(stats.hw.txdgbc)}, + {"dma_dup_good_rx_pkts", SXE_STAT(stats.hw.rxddpc)}, + {"dma_dup_good_rx_bytes", SXE_STAT(stats.hw.rxddbc)}, + {"dma_vm_to_host_rx_pkts", SXE_STAT(stats.hw.rxlpbkpc)}, + {"dma_vm_to_host_rx_bytes", SXE_STAT(stats.hw.rxlpbkbc)}, + {"dma_dup_vm_to_host_pkts", SXE_STAT(stats.hw.rxdlpbkpc)}, + {"dma_dup_vm_to_host_bytes", SXE_STAT(stats.hw.rxdlpbkbc)}, + {"dbu_to_dma_rx_pkts", SXE_STAT(stats.hw.rxtpcing)}, + {"dma_to_host_rx_pkts", SXE_STAT(stats.hw.rxtpceng)}, + {"dma_rx_drop", SXE_STAT(stats.hw.prddc)}, + {"pcie_err_tx_pkts", SXE_STAT(stats.hw.txswerr)}, + {"vm_to_vm_tx_pkts", SXE_STAT(stats.hw.txswitch)}, + {"vm_to_vm_tx_dropped", SXE_STAT(stats.hw.txrepeat)}, + {"dma_tx_desc_err", SXE_STAT(stats.hw.txdescerr)}, +}; + +static const char sxe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + "legacy-rx", +#endif +#ifdef SXE_IPSEC_CONFIGURE + "vf-ipsec", +#endif +}; + +static const struct sxe_mac_stats_info mac_stats[] = { + {"crcerrs", SXE_MAC_STATS_OFFSET(crcerrs)}, + {"errbc", SXE_MAC_STATS_OFFSET(errbc)}, + {"rlec", SXE_MAC_STATS_OFFSET(rlec)}, + {"prc64", SXE_MAC_STATS_OFFSET(prc64)}, + {"prc127", SXE_MAC_STATS_OFFSET(prc127)}, + {"prc255", SXE_MAC_STATS_OFFSET(prc255)}, + {"prc511", SXE_MAC_STATS_OFFSET(prc511)}, + {"prc1023", SXE_MAC_STATS_OFFSET(prc1023)}, + {"prc1522", SXE_MAC_STATS_OFFSET(prc1522)}, + {"bprc", SXE_MAC_STATS_OFFSET(bprc)}, + {"mprc", SXE_MAC_STATS_OFFSET(mprc)}, + {"gprc", SXE_MAC_STATS_OFFSET(gprc)}, + {"gorc", SXE_MAC_STATS_OFFSET(gorc)}, + {"ruc", SXE_MAC_STATS_OFFSET(ruc)}, + {"rfc", SXE_MAC_STATS_OFFSET(rfc)}, + {"roc", SXE_MAC_STATS_OFFSET(roc)}, + {"rjc", SXE_MAC_STATS_OFFSET(rjc)}, + {"tor", SXE_MAC_STATS_OFFSET(tor)}, + {"tpr", SXE_MAC_STATS_OFFSET(tpr)}, + {"gptc", SXE_MAC_STATS_OFFSET(gptc)}, + {"gotc", SXE_MAC_STATS_OFFSET(gotc)}, + {"tpt", SXE_MAC_STATS_OFFSET(tpt)}, + {"ptc64", SXE_MAC_STATS_OFFSET(ptc64)}, + {"ptc127", SXE_MAC_STATS_OFFSET(ptc127)}, + {"ptc255", SXE_MAC_STATS_OFFSET(ptc255)}, + {"ptc511", SXE_MAC_STATS_OFFSET(ptc511)}, + {"ptc1023", SXE_MAC_STATS_OFFSET(ptc1023)}, + {"ptc1522", SXE_MAC_STATS_OFFSET(ptc1522)}, + {"mptc", SXE_MAC_STATS_OFFSET(mptc)}, + {"bptc", SXE_MAC_STATS_OFFSET(bptc)}, + {"prcpf[0]", SXE_MAC_STATS_OFFSET(prcpf[0])}, + {"prcpf[1]", SXE_MAC_STATS_OFFSET(prcpf[1])}, + {"prcpf[2]", SXE_MAC_STATS_OFFSET(prcpf[2])}, + {"prcpf[3]", SXE_MAC_STATS_OFFSET(prcpf[3])}, + {"prcpf[4]", SXE_MAC_STATS_OFFSET(prcpf[4])}, + {"prcpf[5]", SXE_MAC_STATS_OFFSET(prcpf[5])}, + {"prcpf[6]", SXE_MAC_STATS_OFFSET(prcpf[6])}, + {"prcpf[7]", SXE_MAC_STATS_OFFSET(prcpf[7])}, + {"pfct[0]", SXE_MAC_STATS_OFFSET(pfct[0])}, + {"pfct[1]", SXE_MAC_STATS_OFFSET(pfct[1])}, + {"pfct[2]", SXE_MAC_STATS_OFFSET(pfct[2])}, + {"pfct[3]", SXE_MAC_STATS_OFFSET(pfct[3])}, + {"pfct[4]", SXE_MAC_STATS_OFFSET(pfct[4])}, + {"pfct[5]", SXE_MAC_STATS_OFFSET(pfct[5])}, + {"pfct[6]", SXE_MAC_STATS_OFFSET(pfct[6])}, + {"pfct[7]", SXE_MAC_STATS_OFFSET(pfct[7])}, +}; + +u32 sxe_mac_stats_regs_num_get(void) +{ + return ARRAY_SIZE(mac_stats); +} + +u32 sxe_self_test_suite_num_get(void) +{ + return sizeof(sxe_gstrings_test) / ETH_GSTRING_LEN; +} + +u32 sxe_stats_num_get(void) +{ + return ARRAY_SIZE(sxe_gstrings_stats); +} + +u32 sxe_dma_stats_num_get(void) +{ + return ARRAY_SIZE(sxe_gstrings_dma_stats); +} + +u32 sxe_priv_flags_num_get(void) +{ + return ARRAY_SIZE(sxe_priv_flags_strings); +} + +static void sxe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, SXE_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, SXE_VERSION, + sizeof(drvinfo->version)); + + sxe_fw_version_get(adapter); + strlcpy(drvinfo->fw_version, (s8 *)adapter->fw_info.fw_version, + sizeof(drvinfo->fw_version)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = SXE_PRIV_FLAGS_STR_LEN; + + return; +} + +static int sxe_get_sset_count(struct net_device *netdev, int sset) +{ + int ret; + + switch (sset) { + case ETH_SS_TEST: + ret = SXE_TEST_GSTRING_ARRAY_SIZE; + break; + case ETH_SS_STATS: + ret = SXE_STATS_LEN; + break; + case ETH_SS_PRIV_FLAGS: + ret = SXE_PRIV_FLAGS_STR_LEN; + break; + default: + ret = -EOPNOTSUPP; + } + + LOG_DEBUG("type cmd=%d, string len=%d\n", sset, ret); + + return ret; +} + +static void sxe_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + s8 *p; + u32 i, j, start; + struct sxe_ring *ring; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + struct sxe_adapter *adapter = netdev_priv(netdev); + + stats_lock(adapter); + sxe_stats_update(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < SXE_STATS_ARRAY_SIZE; i++) { + switch (sxe_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (s8 *) net_stats + + sxe_gstrings_stats[i].stat_offset; + break; + case SXE_STATS: + p = (s8 *) adapter + + sxe_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (sxe_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = adapter->tx_ring_ctxt.ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < SXE_RX_RING_NUM; j++) { + ring = adapter->rx_ring_ctxt.ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < SXE_PKG_BUF_NUM_MAX; j++) { + data[i++] = adapter->stats.hw.dburxtcin[j]; + data[i++] = adapter->stats.hw.dburxtcout[j]; + data[i++] = adapter->stats.hw.dburxdrofpcnt[j]; + data[i++] = adapter->stats.hw.dburxgdreecnt[j]; + } + + for (j = 0; j < SXE_PKG_BUF_NUM_MAX; j++) { + data[i++] = adapter->stats.hw.dbutxtcin[j]; + data[i++] = adapter->stats.hw.dbutxtcout[j]; + } + + for (j = 0; j < SXE_DCB_8_TC; j++) { + data[i++] = adapter->stats.hw.qptc[j]; + data[i++] = adapter->stats.hw.qprc[j]; + data[i++] = adapter->stats.hw.qbtc[j]; + data[i++] = adapter->stats.hw.qbrc[j]; + data[i++] = adapter->stats.hw.qprdc[j]; + } + + for (j = 0; j < sxe_dma_stats_num_get(); j++) { + p = (s8 *) adapter + sxe_gstrings_dma_stats[j].stat_offset; + data[i++] = *(u64 *)p; + } + + for (j = 0; j < SXE_PKG_BUF_NUM_MAX; j++) { + data[i++] = adapter->stats.hw.prcpf[j]; + data[i++] = adapter->stats.hw.pfct[j]; + } + stats_unlock(adapter); + + return; +} + +static void sxe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u32 i; + char *string = (char *)data; + + switch (stringset) { + case ETH_SS_TEST: + for (i = 0; i < SXE_TEST_GSTRING_ARRAY_SIZE; i++) { + memcpy(string, sxe_gstrings_test[i], ETH_GSTRING_LEN); + string += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < SXE_STATS_ARRAY_SIZE; i++) { + memcpy(string, sxe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < netdev->num_tx_queues; i++) { + sprintf(string, "tx_ring_%u_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tx_ring_%u_bytes", i); + string += ETH_GSTRING_LEN; + } + for (i = 0; i < SXE_RX_RING_NUM; i++) { + sprintf(string, "rx_ring_%u_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rx_ring_%u_bytes", i); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < SXE_PKG_BUF_NUM_MAX; i++) { + sprintf(string, "rx_pkt_buf_%u_in_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rx_pkt_buf_%u_out_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rx_pkt_buf_%u_overflow_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "rx_pkt_buf_%u_ecc_errors", i); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < SXE_PKG_BUF_NUM_MAX; i++) { + sprintf(string, "tx_pkt_buf_%u_in_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tx_pkt_buf_%u_out_packets", i); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < SXE_DCB_8_TC; i++) { + sprintf(string, "tc_%u_tx_ring_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tc_%u_rx_ring_packets", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tc_%u_tx_ring_bytes", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tc_%u_rx_ring_bytes", i); + string += ETH_GSTRING_LEN; + sprintf(string, "tc_%u_rx_ring_dropped", i); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < sxe_dma_stats_num_get(); i++) { + memcpy(string, sxe_gstrings_dma_stats[i].stat_string, + ETH_GSTRING_LEN); + string += ETH_GSTRING_LEN; + } + + for (i = 0; i < SXE_PKG_BUF_NUM_MAX; i++) { + sprintf(string, "up_%u_pause_recv", i); + string += ETH_GSTRING_LEN; + sprintf(string, "up_%u_pause_send", i); + string += ETH_GSTRING_LEN; + } + + break; + case ETH_SS_PRIV_FLAGS: + memcpy(string, sxe_priv_flags_strings, + SXE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + default: + break; + } + + return ; +} + +static u32 sxe_get_priv_flags(struct net_device *netdev) +{ + u32 priv_flags = 0; +#if !defined HAVE_NO_SWIOTLB_SKIP_CPU_SYNC || \ + defined SXE_IPSEC_CONFIGURE + struct sxe_adapter *adapter = netdev_priv(netdev); +#endif + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + if (adapter->cap & SXE_RX_LEGACY) { + priv_flags |= SXE_PRIV_FLAGS_LEGACY_RX; + } +#endif + +#ifdef SXE_IPSEC_CONFIGURE + if (adapter->cap & SXE_VF_IPSEC_ENABLED) { + priv_flags |= SXE_PRIV_FLAGS_VF_IPSEC_EN; + } +#endif + return priv_flags; +} + +static int sxe_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + u32 cap = adapter->cap; + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + cap &= ~SXE_RX_LEGACY; + if (priv_flags & SXE_PRIV_FLAGS_LEGACY_RX) { + cap |= SXE_RX_LEGACY; + } +#endif + +#ifdef SXE_IPSEC_CONFIGURE + cap &= ~SXE_VF_IPSEC_ENABLED; + if (priv_flags & SXE_PRIV_FLAGS_VF_IPSEC_EN) { + cap |= SXE_VF_IPSEC_ENABLED; + } +#endif + + if (cap != adapter->cap) { + adapter->cap = cap; + + if (netif_running(netdev)) { + LOG_DEBUG_BDF("set priv flags reinit\n"); + sxe_hw_reinit(adapter); + } + } + + LOG_DEBUG_BDF("priv_flags=%u\n", priv_flags); + + return 0; +} + +s32 sxe_fnav_dest_queue_parse( + struct sxe_adapter *adapter, + u64 ring_cookie, + u8 *queue) +{ + s32 ret = 0; + + LOG_DEBUG_BDF("source ring_cookie = 0x%llx\n", ring_cookie); + if (ring_cookie == RX_CLS_FLOW_DISC) { + *queue = SXE_FNAV_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + LOG_DEBUG_BDF("input ring = %u, vf = %u \n", ring, vf); + + if (!vf && (ring >= adapter->rx_ring_ctxt.num)) { + LOG_ERROR_BDF("input ring[%u] exceed max rx ring num[%u]\n", + ring, adapter->rx_ring_ctxt.num); + ret = -EINVAL; + goto l_ret; + } else if (vf && + ((vf > adapter->vt_ctxt.num_vfs) || + ring >= adapter->ring_f.ring_per_pool)) { + LOG_ERROR_BDF("input vf[%u] exceed max vf num[%u] or " + "ring[%u] exceed max rx ring num[%u] in pool\n", + vf, adapter->vt_ctxt.num_vfs, + ring, adapter->ring_f.ring_per_pool); + ret = -EINVAL; + goto l_ret; + } + + if (!vf) { + *queue = adapter->rx_ring_ctxt.ring[ring]->reg_idx; + } else { + *queue = ((vf - 1) * + adapter->ring_f.ring_per_pool ) + ring; + } + } + + LOG_DEBUG_BDF("parse fnav dest queue end, ring_cookie = 0x%llx, queue = %u\n", + ring_cookie, *queue); +l_ret: + return ret; +} + +static s32 sxe_fnav_rule_parse_to_flow( + struct sxe_adapter *adapter, + struct sxe_fnav_rule_node *rule, + struct ethtool_rx_flow_spec *flow) +{ + s32 ret = 0; + union sxe_fnav_rule_info *mask = &adapter->fnav_ctxt.rules_mask; + + switch (rule->rule_info.ntuple.flow_type) { + case SXE_SAMPLE_FLOW_TYPE_TCPV4: + flow->flow_type = TCP_V4_FLOW; + break; + case SXE_SAMPLE_FLOW_TYPE_UDPV4: + flow->flow_type = UDP_V4_FLOW; + break; + case SXE_SAMPLE_FLOW_TYPE_SCTPV4: + flow->flow_type = SCTP_V4_FLOW; + break; + case SXE_SAMPLE_FLOW_TYPE_IPV4: + flow->flow_type = IP_USER_FLOW; + flow->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + flow->h_u.usr_ip4_spec.proto = 0; + flow->m_u.usr_ip4_spec.proto = 0; + break; + default: + LOG_ERROR_BDF("unknow flow type[%u]\n", + rule->rule_info.ntuple.flow_type); + ret = -EINVAL; + goto l_ret; + } + flow->flow_type |= FLOW_EXT; + LOG_DEBUG_BDF("flow_type=0x%x\n", flow->flow_type); + + flow->h_u.tcp_ip4_spec.psrc = rule->rule_info.ntuple.src_port; + flow->h_u.tcp_ip4_spec.pdst = rule->rule_info.ntuple.dst_port; + flow->h_u.tcp_ip4_spec.ip4src = rule->rule_info.ntuple.src_ip[0]; + flow->h_u.tcp_ip4_spec.ip4dst = rule->rule_info.ntuple.dst_ip[0]; + flow->h_ext.vlan_tci = rule->rule_info.ntuple.vlan_id; + flow->h_ext.vlan_etype = rule->rule_info.ntuple.flex_bytes; + flow->h_ext.data[1] = htonl(rule->rule_info.ntuple.vm_pool); + LOG_DEBUG_BDF("parse rule to user src_port[%u], dst_port[%u], src_ip[0x%x], " + "dst_ip[0x%x] vlan_id[%u], flex_bytes[0x%x], vm_pool[%u]\n", + flow->h_u.tcp_ip4_spec.psrc, + flow->h_u.tcp_ip4_spec.pdst, + flow->h_u.tcp_ip4_spec.ip4src, + flow->h_u.tcp_ip4_spec.ip4dst, + flow->h_ext.vlan_tci, + flow->h_ext.vlan_etype, + flow->h_ext.data[1]); + + flow->m_u.tcp_ip4_spec.psrc = mask->ntuple.src_port; + flow->m_u.tcp_ip4_spec.pdst = mask->ntuple.dst_port; + flow->m_u.tcp_ip4_spec.ip4src = mask->ntuple.src_ip[0]; + flow->m_u.tcp_ip4_spec.ip4dst = mask->ntuple.dst_ip[0]; + flow->m_ext.vlan_tci = mask->ntuple.vlan_id; + flow->m_ext.vlan_etype = mask->ntuple.flex_bytes; + flow->m_ext.data[1] = htonl(mask->ntuple.vm_pool); + LOG_DEBUG_BDF("parse rule mask to user src_port[%u], dst_port[%u], src_ip[0x%x]," + " dst_ip[0x%x] vlan_id[%u], flex_bytes[0x%x], vm_pool[%u]\n", + flow->m_u.tcp_ip4_spec.psrc, + flow->m_u.tcp_ip4_spec.pdst, + flow->m_u.tcp_ip4_spec.ip4src, + flow->m_u.tcp_ip4_spec.ip4dst, + flow->m_ext.vlan_tci, + flow->m_ext.vlan_etype, + flow->m_ext.data[1]); + + flow->ring_cookie = rule->ring_cookie; + LOG_DEBUG_BDF("parse ring_cookie[%llu]\n", flow->ring_cookie); + +l_ret: + return ret; +} + +static int sxe_fnav_rule_get(struct sxe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + int ret; + struct ethtool_rx_flow_spec *flow = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct sxe_fnav_rule_node *rule = NULL; + + cmd->data = sxe_fnav_max_rule_num_get( + adapter->fnav_ctxt.rules_table_size); + LOG_DEBUG_BDF("fnav table size = %llu\n", cmd->data); + + LOG_DEBUG_BDF("find rule in loc[%u]\n", flow->location); + rule = sxe_fnav_specific_rule_find(adapter, flow->location); + if (rule == NULL) { + LOG_ERROR_BDF("find in loc[%u] fnav rule failed\n", + flow->location); + ret = -EINVAL; + goto l_end; + } + + ret = sxe_fnav_rule_parse_to_flow(adapter, rule, flow); + if (ret) { + LOG_ERROR_BDF("parse fnav rule[%p] to user failed\n", rule); + goto l_end; + } + +l_end: + return ret; +} + +static int sxe_fnav_rule_locs_get( + struct sxe_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hlist_node *next; + struct sxe_fnav_rule_node *rule; + int cnt = 0; + + cmd->data = sxe_fnav_max_rule_num_get( + adapter->fnav_ctxt.rules_table_size); + + hlist_for_each_entry_safe(rule, next, + &adapter->fnav_ctxt.rules_list, + node) { + if (cnt == cmd->rule_cnt) { + LOG_WARN_BDF("no fnav rules found, max_rule_cnt=%u\n", cnt); + return -EMSGSIZE; + } + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + LOG_DEBUG_BDF("get fnav rule count=%u, table size=%llu\n", + cmd->rule_cnt, cmd->data); + return 0; +} + +static int sxe_rss_hash_srcs_get(struct sxe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case UDP_V4_FLOW: + if (adapter->cap & SXE_RSS_FIELD_IPV4_UDP) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case UDP_V6_FLOW: + if (adapter->cap & SXE_RSS_FIELD_IPV6_UDP) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + LOG_ERROR_BDF("unkown cmd->flow_type=0x%x\n", cmd->flow_type); + return -EINVAL; + } + + LOG_DEBUG_BDF("cmd->flow_type[0x%x] get data[0x%llx]\n", + cmd->flow_type, cmd->data); + return 0; +} + +static int sxe_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int ret = 0; + struct sxe_adapter *adapter = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->rx_ring_ctxt.num; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fnav_ctxt.rule_cnt; + break; + case ETHTOOL_GRXCLSRULE: + ret = sxe_fnav_rule_get(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = sxe_fnav_rule_locs_get(adapter, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + ret = sxe_rss_hash_srcs_get(adapter, cmd); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static s32 sxe_fnav_flow_param_check(struct sxe_adapter *adapter, + struct ethtool_rx_flow_spec *flow) +{ + s32 ret = 0; + u64 max_rule_num; + + if (!(adapter->cap & SXE_FNAV_SPECIFIC_ENABLE)) { + ret = -EOPNOTSUPP; + LOG_ERROR_BDF("sxe not in specific fnav mode\n"); + goto l_ret; + } + + max_rule_num = sxe_fnav_max_rule_num_get( + adapter->fnav_ctxt.rules_table_size); + + if (flow->location >= max_rule_num) { + LOG_MSG_ERR(drv, "location[%u] out of range[%llu]\n", + flow->location, max_rule_num); + ret = -EINVAL; + } + +l_ret: + return ret; +} + +STATIC int sxe_fnav_flow_type_parse( + struct ethtool_rx_flow_spec *flow, + u8 *flow_type) +{ + s32 ret = 0; + + switch (flow->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = SXE_SAMPLE_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = SXE_SAMPLE_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (flow->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = SXE_SAMPLE_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = SXE_SAMPLE_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!flow->m_u.usr_ip4_spec.proto) { + *flow_type = SXE_SAMPLE_FLOW_TYPE_IPV4; + break; + } + LOG_WARN("pass throuth to default in proto[%u]\n", + flow->m_u.usr_ip4_spec.proto); + fallthrough; + default: + LOG_WARN("unknow IP_USER_FLOW proto[%u]\n", + flow->h_u.usr_ip4_spec.proto); + ret = -EINVAL; + } + break; + default: + LOG_WARN("unknow flow type[0x%x]\n", + flow->flow_type & ~FLOW_EXT); + ret = -EINVAL; + } + + LOG_DEBUG("parse flow type = 0x%x\n", *flow_type); + return ret; +} + +static s32 sxe_fnav_rule_parse_from_flow( + struct sxe_adapter *adapter, + struct sxe_fnav_rule_node *rule, + union sxe_fnav_rule_info *mask, + struct ethtool_rx_flow_spec *flow) +{ + s32 ret; + + rule->sw_idx = flow->location; + + ret = sxe_fnav_flow_type_parse(flow, + &rule->rule_info.ntuple.flow_type); + if (ret) { + LOG_MSG_ERR(drv, "unrecognized flow type:0x%x\n", + rule->rule_info.ntuple.flow_type); + ret = -EINVAL; + goto l_ret; + } + + mask->ntuple.flow_type = SXE_SAMPLE_L4TYPE_IPV6_MASK | + SXE_SAMPLE_L4TYPE_MASK; + + if (rule->rule_info.ntuple.flow_type == SXE_SAMPLE_FLOW_TYPE_IPV4) { + LOG_DEBUG_BDF("note: entry SXE_SAMPLE_FLOW_TYPE_IPV4\n"); + mask->ntuple.flow_type &= SXE_SAMPLE_L4TYPE_IPV6_MASK; + } + LOG_DEBUG_BDF("mask's flow_type=0x%x\n", mask->ntuple.flow_type); + + rule->rule_info.ntuple.src_ip[0] = flow->h_u.tcp_ip4_spec.ip4src; + rule->rule_info.ntuple.dst_ip[0] = flow->h_u.tcp_ip4_spec.ip4dst; + rule->rule_info.ntuple.src_port = flow->h_u.tcp_ip4_spec.psrc; + rule->rule_info.ntuple.dst_port = flow->h_u.tcp_ip4_spec.pdst; + + mask->ntuple.src_ip[0] = flow->m_u.tcp_ip4_spec.ip4src; + mask->ntuple.dst_ip[0] = flow->m_u.tcp_ip4_spec.ip4dst; + mask->ntuple.src_port = flow->m_u.tcp_ip4_spec.psrc; + mask->ntuple.dst_port = flow->m_u.tcp_ip4_spec.pdst; + + if (flow->flow_type & FLOW_EXT) { + rule->rule_info.ntuple.vm_pool = + (unsigned char)ntohl(flow->h_ext.data[1]); + rule->rule_info.ntuple.vlan_id = flow->h_ext.vlan_tci; + rule->rule_info.ntuple.flex_bytes = + flow->h_ext.vlan_etype; + + mask->ntuple.vm_pool = + (unsigned char)ntohl(flow->m_ext.data[1]); + mask->ntuple.vlan_id = flow->m_ext.vlan_tci; + mask->ntuple.flex_bytes = flow->m_ext.vlan_etype; + } + + rule->ring_cookie = flow->ring_cookie; + +l_ret: + return ret; +} + +static int sxe_fnav_specific_rule_add( + struct sxe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *flow = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct sxe_fnav_rule_node *rule; + union sxe_fnav_rule_info mask; + u8 queue; + s32 ret; + + ret = sxe_fnav_flow_param_check(adapter, flow); + if (ret) { + LOG_ERROR_BDF("fnav param check failed, ret=%d\n", ret); + goto l_err_ret; + } + + ret = sxe_fnav_dest_queue_parse(adapter, flow->ring_cookie, &queue); + if (ret) { + LOG_ERROR_BDF("get fnav destination queue failed, ret=%d\n", ret); + goto l_err_ret; + } + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) { + LOG_ERROR_BDF("malloc rule mem failed\n"); + ret = -ENOMEM; + goto l_err_ret; + } + + memset(&mask, 0, sizeof(union sxe_fnav_rule_info)); + + ret = sxe_fnav_rule_parse_from_flow(adapter, rule, &mask, flow); + if (ret) { + LOG_ERROR_BDF("get fnav rule info failed, ret=%d\n", ret); + goto l_err_free; + } + + ret = sxe_fnav_specific_rule_add_process(adapter, rule, &mask, queue); + if (ret) { + ret = -EINVAL; + LOG_ERROR_BDF("add fnav rule failed, ret=%d\n", ret); + goto l_err_free; + } + + return ret; + +l_err_free: + kfree(rule); + rule = NULL; +l_err_ret: + return ret; +} + +static int sxe_fnav_specific_rule_del( + struct sxe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *user_flow = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int ret; + + spin_lock(&adapter->fnav_ctxt.specific_lock); + ret = sxe_fnav_sw_specific_rule_del(adapter, user_flow->location); + spin_unlock(&adapter->fnav_ctxt.specific_lock); + + return ret; +} + +static int sxe_rss_hash_srcs_update(struct ethtool_rxnfc *nfc, u32 *cap) +{ + int ret = -EINVAL; + + LOG_INFO("user input nfc->data = 0x%llx, nfc->flow_type=0x%x \n", + nfc->data, nfc->flow_type); + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + LOG_ERROR("data[0x%llx] exist unsupport hash srcs\n", nfc->data); + goto l_err_ret; + } + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) { + LOG_ERROR("data[0x%llx] in flow_type[0x%x] does not" + "exist support hash srcs\n", + nfc->data, nfc->flow_type); + goto l_err_ret; + } + + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) { + LOG_ERROR("data[0x%llx] in flow_type[UDP_V4_FLOW] does not" + "exist support hash srcs\n", + nfc->data); + goto l_err_ret; + } + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + *cap &= ~SXE_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + *cap |= SXE_RSS_FIELD_IPV4_UDP; + break; + default: + goto l_err_ret; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) { + + LOG_ERROR("data[0x%llx] in flow_type[UDP_V6_FLOW] does not" + "exist support hash srcs\n", + nfc->data); + goto l_err_ret; + } + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + *cap &= ~SXE_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + *cap |= SXE_RSS_FIELD_IPV6_UDP; + break; + default: + goto l_err_ret; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) { + goto l_err_ret; + } + + break; + default: + goto l_err_ret; + } + LOG_ERROR("cap=0x%x\n", *cap); + return 0; + +l_err_ret: + return ret; +} + +static int sxe_rss_hash_srcs_set(struct sxe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 cap = adapter->cap; + u32 version = 0; + int ret; + + ret = sxe_rss_hash_srcs_update(nfc, &cap); + if (ret) { + LOG_ERROR_BDF("rss hash srcs update failed\n"); + goto l_ret; + } + + if (cap != adapter->cap) { + struct sxe_hw *hw = &adapter->hw; + if ((cap & UDP_RSS_FLAGS) && + !(adapter->cap & UDP_RSS_FLAGS)) { + LOG_MSG_WARN(drv, "enabling udp rss: fragmented packets may" + "arrive out of order to the stack above\n"); + } + + adapter->cap = cap; + if (cap & SXE_RSS_FIELD_IPV4_UDP) { + version = SXE_RSS_IP_VER_4; + } else if(cap & SXE_RSS_FIELD_IPV6_UDP) { + version = SXE_RSS_IP_VER_6; + } + LOG_DEBUG_BDF("cap=[0x%x], version=%u\n", cap, version); + hw->dbu.ops->rss_hash_pkt_type_update(hw, version); + } + +l_ret: + return ret; +} + +static int sxe_set_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + struct sxe_adapter *adapter = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = sxe_fnav_specific_rule_add(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = sxe_fnav_specific_rule_del(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = sxe_rss_hash_srcs_set(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +static u32 sxe_get_rxfh_key_size(struct net_device *netdev) +{ + return SXE_RSS_KEY_SIZE; +} + +static u32 sxe_rss_indir_size(struct net_device *netdev) +{ + return sxe_rss_redir_tbl_size_get(); +} + +static void sxe_rss_redir_tbl_get(struct sxe_adapter *adapter, u32 *indir) +{ + u32 i; + u32 tbl_size = sxe_rss_redir_tbl_size_get(); + u16 rss_m = sxe_rss_mask_get(adapter); + + if (adapter->cap & SXE_SRIOV_ENABLE) { + rss_m = sxe_rss_num_get(adapter) - 1; + } + + for (i = 0; i < tbl_size; i++) { + indir[i] = adapter->rss_indir_tbl[i] & rss_m; + } + + return ; +} + +static int sxe_get_rxfh(struct net_device *netdev, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) { + *hfunc = ETH_RSS_HASH_TOP; + } + + if (indir) { + sxe_rss_redir_tbl_get(adapter, indir); + } + + if (key) { + memcpy(key, adapter->rss_key, sxe_get_rxfh_key_size(netdev)); + } + + return 0; +} + +static int sxe_set_rxfh(struct net_device *netdev, const u32 *redir, + const u8 *key, const u8 hfunc) +{ + u16 i, max_queues; + struct sxe_adapter *adapter = netdev_priv(netdev); + u16 rss = sxe_rss_num_get(adapter); + u32 tbl_entries = sxe_rss_redir_tbl_size_get(); + struct sxe_hw *hw = &adapter->hw; + + LOG_DEBUG_BDF("rss=%u, tbl_entries=%u\n", rss, tbl_entries); + if (hfunc) { + LOG_ERROR_BDF("sxe unsupport hfunc[%d]\n", hfunc); + return -EINVAL; + } + + if (redir) { + max_queues = min_t(int, + adapter->rx_ring_ctxt.num, rss); + + if ((adapter->cap & SXE_SRIOV_ENABLE) && (max_queues < 2)) { + max_queues = 2; + } + + for (i = 0; i < tbl_entries; i++) { + if (redir[i] >= max_queues) { + LOG_ERROR_BDF("indir[%u]=%u > max_que=%u\n", + i, redir[i], max_queues); + return -EINVAL; + } + } + + for (i = 0; i < tbl_entries; i++) { + adapter->rss_indir_tbl[i] = redir[i]; + } + + hw->dbu.ops->rss_redir_tbl_set_all(hw, adapter->rss_indir_tbl); + } + + if (key) { + memcpy(adapter->rss_key, key, sxe_get_rxfh_key_size(netdev)); + hw->dbu.ops->rss_key_set_all(hw, adapter->rss_key); + } + + return 0; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static void +sxe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *kernel_ring, + struct netlink_ext_ack __always_unused *extack) +#else +static void sxe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = SXE_DESC_CNT_MAX; + ring->tx_max_pending = SXE_DESC_CNT_MAX; + ring->rx_pending = adapter->rx_ring_ctxt.ring[0]->depth; + ring->tx_pending = adapter->tx_ring_ctxt.ring[0]->depth; + + return; +} + +inline static bool sxe_ringparam_changed(struct sxe_adapter *adapter, + struct ethtool_ringparam *ring, + u32 *tx_cnt, u32 *rx_cnt) +{ + bool changed = true; + + *tx_cnt = clamp_t(u32, ring->tx_pending, + SXE_DESC_CNT_MIN, SXE_DESC_CNT_MAX); + *tx_cnt = ALIGN(*tx_cnt, SXE_REQ_DESCRIPTOR_MULTIPLE); + + *rx_cnt = clamp_t(u32, ring->rx_pending, + SXE_DESC_CNT_MIN, SXE_DESC_CNT_MAX); + *rx_cnt = ALIGN(*rx_cnt, SXE_REQ_DESCRIPTOR_MULTIPLE); + + if ((*tx_cnt == adapter->tx_ring_ctxt.depth) && + (*rx_cnt == adapter->rx_ring_ctxt.depth)) { + changed = false; + } + + return changed; +} + +inline static void sxe_ring_depth_set(struct sxe_adapter *adapter, + u32 tx_cnt, u32 rx_cnt) +{ + u32 i; + struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxe_ring **rx_ring = adapter->rx_ring_ctxt.ring; + struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + tx_ring[i]->depth = tx_cnt; + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + xdp_ring[i]->depth = tx_cnt; + } + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + rx_ring[i]->depth = rx_cnt; + } + adapter->tx_ring_ctxt.depth = tx_cnt; + adapter->xdp_ring_ctxt.depth = tx_cnt; + adapter->rx_ring_ctxt.depth = rx_cnt; + + return; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static int +sxe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *user_param, + struct kernel_ethtool_ringparam __always_unused *kernel_ring, + struct netlink_ext_ack __always_unused *extack) +#else +static int sxe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *user_param) +#endif +{ + int ret = 0; + u32 new_rx_count, new_tx_count; + struct sxe_adapter *adapter = netdev_priv(netdev); + + if ((user_param->rx_mini_pending) || (user_param->rx_jumbo_pending)) { + LOG_ERROR_BDF("dont support set rx_mini_pending=%u or rx_jumbo_pending=%u\n", + user_param->rx_mini_pending, user_param->rx_jumbo_pending); + ret = -EINVAL; + goto l_end; + } + + if (!sxe_ringparam_changed(adapter, user_param, + &new_tx_count, &new_rx_count)) { + LOG_DEBUG_BDF("ring depth dont change, tx_depth=%u, rx_depth=%u\n", + new_tx_count, new_rx_count); + goto l_end; + } + + while (test_and_set_bit(SXE_RESETTING, &adapter->state)) { + usleep_range(SXE_NIC_RESET_WAIT_MIN, SXE_NIC_RESET_WAIT_MAX); + } + + if (!netif_running(adapter->netdev)) { + sxe_ring_depth_set(adapter, new_tx_count, new_rx_count); + goto l_clear; + } + + sxe_down(adapter); + + if (new_tx_count != adapter->tx_ring_ctxt.depth) { + ret = sxe_tx_ring_depth_reset(adapter, new_tx_count); + if (ret < 0) { + goto l_up; + } + } + + if (new_rx_count != adapter->rx_ring_ctxt.depth) { + ret = sxe_rx_ring_depth_reset(adapter, new_rx_count); + } +l_up: + sxe_up(adapter); +l_clear: + clear_bit(SXE_RESETTING, &adapter->state); +l_end: + return ret; +} + +static int sxe_nway_reset(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + LOG_DEBUG_BDF("ethtool reset\n"); + sxe_hw_reinit(adapter); + } + + return 0; +} + +static int sxe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_ctxt.ptp_clock) { + info->phc_index = ptp_clock_index(adapter->ptp_ctxt.ptp_clock); + } else { + info->phc_index = -1; + } + + info->tx_types = + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + return 0; +} + +static u32 sxe_max_channels(struct sxe_adapter *adapter) +{ + u32 max_combined; + u8 tcs = sxe_dcb_tc_get(adapter); + + if (!(adapter->cap & SXE_MSIX_ENABLED)) { + max_combined = 1; + } else if (adapter->cap & SXE_SRIOV_ENABLE) { + max_combined = sxe_rss_mask_get(adapter) + 1; + } else if (tcs > SXE_DCB_1_TC) { + if (tcs > SXE_DCB_4_TC) { + max_combined = SXE_8_RING_PER_TC; + } else { + max_combined = SXE_16_RING_PER_TC; + } + } else if (adapter->fnav_ctxt.sample_rate) { + max_combined = SXE_FNAV_RING_NUM_MAX; + } else { + max_combined = SXE_RSS_RING_NUM_MAX; + } + + return min_t(int, max_combined, num_online_cpus()); +} + +static void sxe_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + ch->max_combined = sxe_max_channels(adapter); + + if (adapter->cap & SXE_MSIX_ENABLED) { + ch->max_other = SXE_EVENT_IRQ_NUM; + ch->other_count = SXE_EVENT_IRQ_NUM; + } + + ch->combined_count = sxe_rss_num_get(adapter); + + if ((1 == ch->combined_count) || + (adapter->cap & SXE_SRIOV_ENABLE) || + (sxe_dcb_tc_get(adapter) > 1)) { + LOG_WARN_BDF("current combined count=%u, adapter cap=%x, " + "tcs=%u, sample_rate=%u, dont support fnav\n", + ch->combined_count, adapter->cap, + sxe_dcb_tc_get(adapter), + adapter->fnav_ctxt.sample_rate); + goto l_end; + } + + ch->combined_count = adapter->ring_f.fnav_num; + +l_end: + return; +} + +static int sxe_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + u32 count = ch->combined_count; + + LOG_DEBUG_BDF("user param: cmd=%u, combined=%u, max_combined=%u, " + "max_other=%u, max_rx=%u, max_tx=%u, other_cnt=%u, " + "rx_cnt=%u, tx_cnt=%u\n", + ch->cmd, ch->combined_count, ch->max_combined, + ch->max_other, ch->max_rx, ch->max_tx, ch->other_count, + ch->rx_count, ch->tx_count); + + if (!count || ch->rx_count || ch->tx_count || + (ch->other_count != SXE_EVENT_IRQ_NUM) || + (count > sxe_max_channels(adapter))) { + LOG_ERROR_BDF("ethtool set channel failed, combined count=%u, " + "rx_count=%u, tx_count=%u, other_count=%u, max_ch=%u\n", + count, ch->rx_count, ch->tx_count, ch->other_count, + sxe_max_channels(adapter)); + ret = -EINVAL; + goto l_err; + } + + adapter->ring_f.fnav_limit = count; + + adapter->ring_f.rss_limit = (count > SXE_RSS_RING_NUM_MAX) ? \ + SXE_RSS_RING_NUM_MAX : count; + + ret = sxe_ring_reassign(adapter, sxe_dcb_tc_get(adapter)); + if (ret) { + LOG_ERROR_BDF("sxe_ring_reassign failed, err=%d, combined count=%u, " + "rx_count=%u, tx_count=%u, other_count=%u, max_ch=%u" + "tc=%u\n", + ret, count, ch->rx_count, ch->tx_count, ch->other_count, + sxe_max_channels(adapter), sxe_dcb_tc_get(adapter)); + } + +l_err: + return ret; +} + +static int sxe_get_link_ksettings_proto(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + u32 supported; + u32 advertising; + u32 speed_supported; + bool autoneg_supported; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + adapter->phy_ctxt.ops->get_link_capabilities(adapter, + &speed_supported, &autoneg_supported); + + if (speed_supported & SXE_LINK_SPEED_10GB_FULL) { + supported |= SUPPORTED_10000baseKR_Full; + } + if (speed_supported & SXE_LINK_SPEED_1GB_FULL) { + supported |= SUPPORTED_1000baseKX_Full; + } + + if (adapter->phy_ctxt.autoneg_advertised) { + advertising = 0; + if (adapter->phy_ctxt.autoneg_advertised & SXE_LINK_SPEED_10GB_FULL) { + advertising |= SUPPORTED_10000baseKR_Full; + } + if (adapter->phy_ctxt.autoneg_advertised & SXE_LINK_SPEED_1GB_FULL) { + advertising |= SUPPORTED_1000baseKX_Full; + } + } else { + advertising = supported; + } + + if (autoneg_supported) { + supported |= SUPPORTED_Autoneg; + advertising |= ADVERTISED_Autoneg; + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (adapter->phy_ctxt.is_sfp) { + switch(adapter->phy_ctxt.sfp_info.type) { + case SXE_SFP_TYPE_DA_CU: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_DA; + break; + case SXE_SFP_TYPE_SRLR: + case SXE_SFP_TYPE_1G_SXLX: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_FIBRE; + break; + case SXE_SFP_TYPE_1G_CU: + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + break; + case SXE_SFP_TYPE_NOT_PRESENT: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_NONE; + break; + default: + supported |= SUPPORTED_FIBRE; + advertising |= ADVERTISED_FIBRE; + cmd->base.port = PORT_OTHER; + break; + } + } else { + supported |= SUPPORTED_TP; + advertising |= ADVERTISED_TP; + cmd->base.port = PORT_TP; + } + + supported |= SUPPORTED_Pause; + + switch (hw->fc.requested_mode) { + case SXE_FC_FULL: + advertising |= ADVERTISED_Pause; + break; + case SXE_FC_RX_PAUSE: + advertising |= ADVERTISED_Pause | + ADVERTISED_Asym_Pause; + break; + case SXE_FC_TX_PAUSE: + advertising |= ADVERTISED_Asym_Pause; + break; + default: + advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + } + + if (netif_carrier_ok(netdev)) { + switch (adapter->link.speed) { + case SXE_LINK_SPEED_10GB_FULL: + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + break; + case SXE_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + break; + case SXE_LINK_SPEED_100_FULL: + cmd->base.speed = SPEED_100; + cmd->base.duplex = DUPLEX_FULL; + break; + case SXE_LINK_SPEED_10_FULL: + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_FULL; + break; + default: + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + break; + } + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + LOG_DEBUG_BDF("ethtool get link, speed=%x, is_up=%d, base.speed=%u\n", + adapter->link.speed, adapter->link.is_up, cmd->base.speed); + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + + return 0; +} + +static int sxe_set_link_ksettings_proto(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + int ret = 0; + u32 advertised, old; + u32 supported, advertising; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_phy_context *phy_ctxt = &adapter->phy_ctxt; + + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + LOG_DEBUG_BDF("multispeed sfp=%d, advertising=%x, supported=%x, " + "cmd autoneg=%x\n", + phy_ctxt->sfp_info.multispeed_fiber, + advertising, + supported, + cmd->base.autoneg); + + if (phy_ctxt->sfp_info.multispeed_fiber) { + if (advertising & ~supported) { + LOG_ERROR_BDF("(advertising & ~supported) > 0 failed," + "advertising=%x, supported=%x\n", advertising, supported); + ret = -EINVAL; + goto l_end; + } + + if (!cmd->base.autoneg && phy_ctxt->sfp_info.multispeed_fiber) { + if (advertising == (ADVERTISED_10000baseKR_Full | + ADVERTISED_1000baseKX_Full)) { + ret = -EINVAL; + goto l_end; + } + } + + old = phy_ctxt->autoneg_advertised; + advertised = 0; + if (advertising & ADVERTISED_10000baseKR_Full) { + advertised |= SXE_LINK_SPEED_10GB_FULL; + } + + if (advertising & ADVERTISED_1000baseKX_Full) { + advertised |= SXE_LINK_SPEED_1GB_FULL; + } + + if (old == advertised) { + ret = 0; + goto l_end; + } + + set_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state); + adapter->link.sfp_multispeed_time = jiffies; + while (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state)) { + usleep_range(SXE_SFP_INIT_WAIT_ITR_MIN, SXE_SFP_INIT_WAIT_ITR_MAX); + } + + smp_wmb(); + clear_bit(SXE_LINK_NEED_CONFIG, &(adapter->monitor_ctxt.state)); + set_bit(SXE_LINK_SPEED_CHANGE, &(adapter->monitor_ctxt.state)); + adapter->hw.mac.auto_restart = true; + LOG_INFO_BDF("set auto_restart true.\n"); + + ret = sxe_link_configure(adapter, advertised); + if (ret) { + LOG_MSG_INFO(probe, "setup link failed, ret = %d, advertised=%d\n", + ret, advertised); + sxe_link_configure(adapter, old); + } + clear_bit(SXE_IN_SFP_INIT, &adapter->state); + + } else { + return -EPERM; + } + +l_end: + return ret; +} + +static void sxe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + u32 current_mode = hw->mac.ops->fc_requested_mode_get(hw); + + if (sxe_device_supports_autoneg_fc(hw) && + !hw->mac.ops->is_fc_autoneg_disabled(hw)) { + pause->autoneg = 1; + } else { + pause->autoneg = 0; + } + + if (current_mode == SXE_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (current_mode == SXE_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (current_mode == SXE_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } + + LOG_DEBUG_BDF("flow control current mode, " + "autoneg = %u, rx_pause = %u, tx_pause = %u", + pause->autoneg, pause->rx_pause, pause->tx_pause); + + return; +} + +static int sxe_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + int ret = 0; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + bool old_autoneg_status = hw->mac.ops->is_fc_autoneg_disabled(hw); + bool new_autoneg_status; + enum sxe_fc_mode old_requested_mode = hw->mac.ops->fc_requested_mode_get(hw); + enum sxe_fc_mode new_requested_mode; + + if ((pause->autoneg == AUTONEG_ENABLE) && + !sxe_device_supports_autoneg_fc(hw)) { + LOG_ERROR_BDF("netdev[%p] does not support autoneg\n", netdev); + ret = -EINVAL; + goto l_ret; + } + + new_autoneg_status = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) { + new_requested_mode = SXE_FC_FULL; + } else if (pause->rx_pause && !pause->tx_pause) { + new_requested_mode = SXE_FC_RX_PAUSE; + } else if (!pause->rx_pause && pause->tx_pause) { + new_requested_mode = SXE_FC_TX_PAUSE; + } else { + new_requested_mode = SXE_FC_NONE; + } + + LOG_ERROR_BDF("netdev[%p] user set new_disable_fc_autoneg = %s, " + "new_requested_mode = %u, old_disable_fc_autoneg = %s," + "old_requested_mode = %u\n", netdev, + new_autoneg_status ? "yes" : "no", new_requested_mode, + old_autoneg_status ? "yes" : "no", old_requested_mode); + + if (old_autoneg_status != new_autoneg_status || + old_requested_mode != new_requested_mode) { + hw->mac.ops->fc_autoneg_disable_set(hw, new_autoneg_status); + hw->mac.ops->fc_requested_mode_set(hw, new_requested_mode); + + if (netif_running(netdev)) { + sxe_hw_reinit(adapter); + } else { + sxe_reset(adapter); + } + } + +l_ret: + return ret; +} + +#ifdef SXE_WOL_CONFIGURE + +bool sxe_is_wol_supported(struct sxe_adapter *adapter) +{ + return true; +} + +static s32 sxe_wol_cap_check(struct sxe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + s32 ret = 0; + + if (!sxe_is_wol_supported(adapter)) { + ret = -SXE_ERR_DEVICE_NOT_SUPPORTED; + wol->supported = 0; + } + + return ret; +} + +#endif + +static void sxe_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ +#ifdef SXE_WOL_CONFIGURE + + struct sxe_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (sxe_wol_cap_check(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) { + goto l_ret; + } + + if (adapter->wol & SXE_WUFC_EX) { + wol->wolopts |= WAKE_UCAST; + } + + if (adapter->wol & SXE_WUFC_MC) { + wol->wolopts |= WAKE_MCAST; + } + + if (adapter->wol & SXE_WUFC_BC) { + wol->wolopts |= WAKE_BCAST; + } + + if (adapter->wol & SXE_WUFC_MAG) { + wol->wolopts |= WAKE_MAGIC; + } + + if (adapter->wol & SXE_WUFC_LNKC) { + wol->wolopts |= WAKE_PHY; + } + +l_ret: + return; +#else + wol->supported = 0; + wol->wolopts = 0; + return; + +#endif +} + +#ifdef SXE_WOL_CONFIGURE + +s32 sxe_fw_wol_set(struct sxe_adapter *adapter, u32 enable) +{ + return 0; +} + +#endif + +static int sxe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ +#ifdef SXE_WOL_CONFIGURE + struct sxe_adapter *adapter = netdev_priv(netdev); + u32 wol_old; + int ret = 0; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) { + LOG_ERROR_BDF("sxe's not support wol mode[%u]\n", wol->wolopts); + ret = -EOPNOTSUPP; + goto l_ret; + } + + if (sxe_wol_cap_check(adapter, wol)) { + ret = wol->wolopts ? -EOPNOTSUPP : 0; + goto l_ret; + } + + wol_old = adapter->wol; + adapter->wol = 0; + + if (wol->wolopts & WAKE_PHY) { + adapter->wol |= SXE_WUFC_LNKC; + } + + if (wol->wolopts & WAKE_UCAST) { + adapter->wol |= SXE_WUFC_EX; + } + + if (wol->wolopts & WAKE_MCAST) { + adapter->wol |= SXE_WUFC_MC; + } + + if (wol->wolopts & WAKE_BCAST) { + adapter->wol |= SXE_WUFC_BC; + } + + if (wol->wolopts & WAKE_MAGIC) { + adapter->wol |= SXE_WUFC_MAG; + } + + LOG_DEBUG_BDF("old wol config:0x%x, new wol config:0x%x\n", + wol_old, adapter->wol); + if (adapter->wol) { + if (!wol_old) { + sxe_fw_wol_set(adapter, 1); + } + } else { + if (wol_old) { + sxe_fw_wol_set(adapter, 0); + } + } + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + +l_ret: + return ret; +#else + return -EOPNOTSUPP; + +#endif +} + +static u32 sxe_get_msglevel(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void sxe_set_msglevel(struct net_device *netdev, u32 data) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; + + return; +} + +STATIC irqreturn_t sxe_irq_test_handler(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *) data; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + adapter->test_ctxt.icr |= hw->irq.ops->pending_irq_read_clear(&adapter->hw); + LOG_INFO_BDF("irq test : in irq handler eicr=%x\n", adapter->test_ctxt.icr); + + return IRQ_HANDLED; +} + +static s32 sxe_irq_test(struct sxe_adapter *adapter) +{ + bool shared_int = true; + s32 ret = SXE_DIAG_TEST_PASSED; + struct sxe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 irq = adapter->pdev->irq; + + if (adapter->irq_ctxt.msix_entries) { + goto l_end; + } else if (adapter->cap & SXE_MSI_ENABLED) { + shared_int = false; + LOG_INFO_BDF("test irq: msix mode\n"); + if (request_irq(irq, sxe_irq_test_handler, 0, + netdev->name, netdev)) { + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + } else if (!request_irq(irq, sxe_irq_test_handler, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; + LOG_INFO_BDF("test irq: intx mode, type:probe shared\n"); + } else if (request_irq(irq, sxe_irq_test_handler, IRQF_SHARED, + netdev->name, netdev)) { + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + LOG_MSG_INFO(hw, "testing %s interrupt\n", shared_int ? "shared" : "unshared"); + + ret = hw->irq.ops->irq_test(hw, &adapter->test_ctxt.icr, shared_int); + if (ret) { + LOG_ERROR_BDF("testing unshared irq failed\n"); + } + + free_irq(irq, netdev); + +l_end: + return ret; +} + +int sxe_reg_test(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + + if (sxe_is_hw_fault(hw)) { + LOG_MSG_ERR(drv, "nic hw fault - register test blocked\n"); + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + + ret = hw->setup.ops->regs_test(hw); + if (ret) { + LOG_ERROR_BDF("register test failed\n"); + goto l_end; + } + +l_end: + return ret; +} + +STATIC s32 sxe_link_test(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed; + + if (sxe_is_hw_fault(hw)) { + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + + sxe_link_info_get(adapter, &link_speed, &link_up); + if (!link_up) { + ret = -SXE_DIAG_TEST_BLOCKED; + } else { + ret = SXE_DIAG_TEST_PASSED; + } + +l_end: + return ret; +} + +static void sxe_rx_buffer_clean (struct sxe_ring *ring, \ + struct sxe_rx_buffer *rx_buffer) +{ + struct sk_buff *skb; +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + if (!rx_buffer) { + return; + } + + if (rx_buffer->skb) { + skb = rx_buffer->skb; + if (SXE_CTRL_BUFFER(skb)->page_released) { + dma_unmap_page_attrs(ring->dev, + SXE_CTRL_BUFFER(skb)->dma, + sxe_rx_pg_size(ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + } + dev_kfree_skb(skb); + } + + if (rx_buffer->page) { + dma_sync_single_range_for_cpu(ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + sxe_rx_bufsz(ring), + DMA_FROM_DEVICE); + + dma_unmap_page_attrs(ring->dev, rx_buffer->dma, + sxe_rx_pg_size(ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } +} + +static void sxe_test_ring_free(struct sxe_adapter *adapter) +{ + struct sxe_ring *tx_ring; + struct sxe_ring *rx_ring; + struct sxe_rx_buffer *rx_buffer; + struct sxe_rx_buffer *rx_buffer_info; + u16 nta; + sxe_hw_rx_disable(adapter); + sxe_hw_tx_disable(adapter); + + sxe_reset(adapter); + + tx_ring = &adapter->test_ctxt.tx_ring; + rx_ring = &adapter->test_ctxt.rx_ring; + sxe_tx_ring_free(tx_ring); + rx_buffer_info = rx_ring->rx_buffer_info; + if (rx_buffer_info) { + nta = rx_ring->next_to_alloc; + rx_buffer = &rx_buffer_info[nta]; + sxe_rx_buffer_clean(rx_ring, rx_buffer); + } + sxe_rx_ring_free(rx_ring); + + return; +} + +STATIC s32 sxe_test_ring_configure(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + struct sxe_ring *tx_ring = &adapter->test_ctxt.tx_ring; + struct sxe_ring *rx_ring = &adapter->test_ctxt.rx_ring; + + ret = sxe_test_tx_configure(adapter, tx_ring); + if (ret) { + ret = -SXE_DIAG_TX_RING_CONFIGURE_ERR; + LOG_ERROR_BDF("test tx ring config failed, ret=%d\n", ret); + goto l_end; + } + + ret = sxe_test_rx_configure(adapter, rx_ring); + if (ret) { + ret = -SXE_DIAG_RX_RING_CONFIGURE_ERR; + LOG_ERROR_BDF("test rx ring config failed, ret=%d\n", ret); + goto err_nomem; + } + + hw->mac.ops->txrx_enable(hw); + + return 0; + +err_nomem: + sxe_test_ring_free(adapter); +l_end: + return ret; +} + +#define SXE_DEFAULT_MTU 1500 + +static s32 sxe_loopback_pcs_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode, + u32 max_frame) +{ + s32 ret; + sxe_pcs_cfg_s pcs_cfg; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + pcs_cfg.mode = mode; + pcs_cfg.mtu = max_frame; + + cmd.req = &pcs_cfg; + cmd.req_len = sizeof(pcs_cfg); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_PCS_SDS_INIT; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret); + goto l_end; + } + + sxe_fc_mac_addr_set(adapter); + + LOG_INFO_BDF("mode:%u loopback pcs init done.\n", mode); + +l_end: + return ret; +} + +static void sxe_loopback_test_setup(struct sxe_adapter *adapter) +{ + s32 ret; + u32 max_frme = SXE_DEFAULT_MTU + ETH_FRAME_LEN + ETH_FCS_LEN; + + (void)sxe_sfp_tx_laser_disable(adapter); + + ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO, max_frme); + if (ret) { + LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n", + SXE_PCS_MODE_10GBASE_KR_WO, ret); + } + + ret = sxe_loopback_pcs_init(adapter, SXE_PCS_MODE_LPBK_PHY_TX2RX, + max_frme); + if (ret) { + LOG_ERROR_BDF("pcs sds init failed, mode=%d, ret=%d\n", + SXE_PCS_MODE_LPBK_PHY_TX2RX, ret); + } + + usleep_range(SXE_LPBK_TX_DISB_WAIT_MIN, SXE_LPBK_TX_DISB_WAIT_MAX); + + return; +} + +static void sxe_loopback_frame_create(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; + return; +} + +STATIC bool sxe_loopback_frame_check(struct sxe_rx_buffer *rx_buffer, + unsigned int frame_size) +{ + u8 *data; + bool match = true; + + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + frame_size >>= 1; + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) { + match = false; + } + + kunmap(rx_buffer->page); + + return match; +} + +static u16 sxe_test_ring_clean(struct sxe_ring *rx_ring, + struct sxe_ring *tx_ring, + u32 size) +{ + u16 rx_ntc, tx_ntc, count = 0; + union sxe_tx_data_desc *tx_desc; + union sxe_rx_data_desc *rx_desc; + struct sxe_tx_buffer *tx_buffer; + struct sxe_rx_buffer *rx_buffer; + struct sxe_adapter *adapter = netdev_priv(rx_ring->netdev); + + rx_ntc = rx_ring->next_to_clean; + tx_ntc = tx_ring->next_to_clean; + rx_desc = SXE_RX_DESC(rx_ring, rx_ntc); + + while (tx_ntc != tx_ring->next_to_use) { + tx_desc = SXE_TX_DESC(tx_ring, tx_ntc); + + if (!(tx_desc->wb.status & cpu_to_le32(SXE_TX_DESC_STAT_DD))) { + LOG_ERROR_BDF("xmit dont completed, next_to_use=%u, count=%u\n", + tx_ring->next_to_use, count); + return count; + } + + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + + dev_kfree_skb_any(tx_buffer->skb); + + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + tx_ntc++; + if (tx_ntc == tx_ring->depth) { + tx_ntc = 0; + } + + count++; + } + + count = 0; + while (rx_desc->wb.upper.length) { + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + sxe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + if (sxe_loopback_frame_check(rx_buffer, size)) { + count++; + } else { + break; + } + + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + sxe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + rx_ntc++; + if (rx_ntc == rx_ring->depth) { + rx_ntc = 0; + } + + rx_desc = SXE_RX_DESC(rx_ring, rx_ntc); + } + LOG_DEBUG_BDF("revice pkg num=%u\n", count); + + netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->idx)); + + sxe_rx_ring_buffers_alloc(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; + tx_ring->next_to_clean = tx_ntc; + + return count; +} + +STATIC int sxe_loopback_test_run(struct sxe_adapter *adapter) +{ + s32 ret = SXE_DIAG_TEST_PASSED; + u32 size = SXE_LOOPBACK_TEST_FRAME_SIZE; + struct sk_buff *skb; + u32 i, j, lc, good_cnt; + netdev_tx_t tx_ret_val; + u32 cap_orig = adapter->cap; + struct sxe_hw *hw = &adapter->hw; + struct sxe_ring *tx_ring = &adapter->test_ctxt.tx_ring; + struct sxe_ring *rx_ring = &adapter->test_ctxt.rx_ring; + struct sxe_mac_stats *hw_stats = &adapter->stats.hw; + + adapter->cap &= ~SXE_DCB_ENABLE; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret = -SXE_DIAG_ALLOC_SKB_ERR; + goto l_end; + } + + sxe_loopback_frame_create(skb, size); + skb_put(skb, size); + + if (rx_ring->depth <= tx_ring->depth) { + lc = ((tx_ring->depth / SXE_LOOPBACK_TEST_DESC_COUNT) * SXE_LOOPBACK_TEST_LOOP) + 1; + } else { + lc = ((rx_ring->depth / SXE_LOOPBACK_TEST_DESC_COUNT) * SXE_LOOPBACK_TEST_LOOP) + 1; + } + + for (j = 0; j <= lc; j++) { + + hw->stat.ops->stats_get(hw, hw_stats); + LOG_DEBUG_BDF("max_loop_num=%u, cnt=%u original gptc:%llu gprc:%llu " + "qptc:%llu dbutxtcin:%llu dbutxtcout:%llu " + "qprc:%llu dburxtcin:%llu dburxtcout:%llu " + "crcerrs:%llu rfc:%llu\n", + lc, j, hw_stats->gptc, + hw_stats->gprc, + hw_stats->qptc[0], + hw_stats->dbutxtcin[0], + hw_stats->dbutxtcout[0], + hw_stats->qprc[0], + hw_stats->dburxtcin[0], + hw_stats->dburxtcout[0], + hw_stats->crcerrs, hw_stats->rfc); + + good_cnt = 0; + for (i = 0; i < 64; i++) { + skb_get(skb); + tx_ret_val = sxe_ring_xmit(skb, adapter->netdev, tx_ring); + if (tx_ret_val == NETDEV_TX_OK) { + good_cnt++; + } + } + + hw->stat.ops->stats_get(hw, hw_stats); + LOG_DEBUG_BDF("=====j:%u tx done==== gptc:%llu gprc:%llu " + "qptc:%llu dbutxtcin:%llu dbutxtcout:%llu" + "qprc:%llu dburxtcin:%llu dburxtcout:%llu" + "crcerrs:%llu rfc:%llu\n", + j, hw_stats->gptc, hw_stats->gprc, + hw_stats->qptc[0], + hw_stats->dbutxtcin[0], + hw_stats->dbutxtcout[0], + hw_stats->qprc[0], + hw_stats->dburxtcin[0], + hw_stats->dburxtcout[0], + hw_stats->crcerrs, hw_stats->rfc); + + if (good_cnt != 64) { + LOG_ERROR_BDF("xmit pkg num=%u, !=64\n", good_cnt); + ret = -SXE_DIAG_LOOPBACK_SEND_TEST_ERR; + break; + } + + msleep(200); + + good_cnt = sxe_test_ring_clean(rx_ring, tx_ring, size); + + hw->stat.ops->stats_get(hw, hw_stats); + LOG_DEBUG_BDF("====j:%u rx done===== gptc:%llu gprc:%llu " + "qptc:%llu dbutxtcin:%llu dbutxtcout:%llu" + "qprc:%llu dburxtcin:%llu dburxtcout:%llu" + "crcerrs:%llu rfc:%llu\n", + j, hw_stats->gptc, hw_stats->gprc, + hw_stats->qptc[0], + hw_stats->dbutxtcin[0], + hw_stats->dbutxtcout[0], + hw_stats->qprc[0], + hw_stats->dburxtcin[0], + hw_stats->dburxtcout[0], + hw_stats->crcerrs, hw_stats->rfc); + + if (good_cnt != 64) { + LOG_ERROR_BDF("recive pkg num=%u, !=64\n", good_cnt); + ret = -SXE_DIAG_LOOPBACK_RECV_TEST_ERR; + break; + } + } + + kfree_skb(skb); + adapter->cap = cap_orig; +l_end: + return ret; +} + +STATIC s32 sxe_loopback_test(struct sxe_adapter *adapter) +{ + s32 ret; + LOG_DEBUG_BDF("loopback test start\n"); + + ret = sxe_test_ring_configure(adapter); + if (ret) { + goto l_end; + } + + LOG_DEBUG_BDF("test_ring_configure end\n"); + + sxe_loopback_test_setup(adapter); + + LOG_DEBUG_BDF("loopback_test_setup end\n"); + + ret = sxe_loopback_test_run(adapter); + + LOG_DEBUG_BDF("sxe_loopback_test_run end\n"); + + sxe_test_ring_free(adapter); +l_end: + LOG_INFO_BDF("loopback test end, ret = %d\n", ret); + return ret; +} + +static inline void sxe_all_test_result_set(u64 *res, s32 value) +{ + res[SXE_DIAG_REGS_TEST] = value; + res[SXE_DIAG_EEPROM_TEST] = value; + res[SXE_DIAG_IRQ_TEST] = value; + res[SXE_DIAG_LOOPBACK_TEST] = value; + res[SXE_DIAG_LINK_TEST] = value; +} + +static void sxe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *result) +{ + u32 i; + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct sxe_hw *hw = &adapter->hw; + + LOG_INFO_BDF("ethtool -t start\n"); + + if (sxe_is_hw_fault(hw)) { + eth_test->flags |= ETH_TEST_FL_FAILED; + sxe_all_test_result_set(result, SXE_DIAG_TEST_BLOCKED); + LOG_MSG_ERR(hw, "nic hw fault - test blocked\n"); + goto l_end; + } + + set_bit(SXE_TESTING, &adapter->state); + + if (eth_test->flags != ETH_TEST_FL_OFFLINE) { + LOG_MSG_INFO(hw, "online testing starting\n"); + + ret = sxe_link_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXE_DIAG_LINK_TEST] = -ret; + + result[SXE_DIAG_REGS_TEST] = SXE_DIAG_TEST_PASSED; + result[SXE_DIAG_EEPROM_TEST] = SXE_DIAG_TEST_PASSED; + result[SXE_DIAG_IRQ_TEST] = SXE_DIAG_TEST_PASSED; + result[SXE_DIAG_LOOPBACK_TEST] = SXE_DIAG_TEST_PASSED; + + clear_bit(SXE_TESTING, &adapter->state); + goto skip_ol_tests; + } + + if (adapter->cap & SXE_SRIOV_ENABLE) { + for (i = 0; i < adapter->vt_ctxt.num_vfs; i++) { + if (adapter->vt_ctxt.vf_info[i].is_ready) { + LOG_DEV_WARN("offline diagnostic is not " + "supported when VFs are present\n"); + sxe_all_test_result_set(result, SXE_DIAG_TEST_BLOCKED); + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(SXE_TESTING, &adapter->state); + goto skip_ol_tests; + } + } + } + + LOG_MSG_INFO(hw, "offline testing starting\n"); + + msleep_interruptible(SXE_TEST_SLEEP_TIME * SXE_HZ_TRANSTO_MS); + ret = sxe_link_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXE_DIAG_LINK_TEST] = -ret; + + if (if_running) { + sxe_close(netdev); + } else { + sxe_reset(adapter); + } + + LOG_MSG_INFO(hw, "register testing starting\n"); + ret = sxe_reg_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXE_DIAG_REGS_TEST] = -ret; + + sxe_reset(adapter); + + result[SXE_DIAG_EEPROM_TEST] = SXE_DIAG_TEST_PASSED; + + LOG_MSG_INFO(hw, "interrupt testing starting\n"); + ret = sxe_irq_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXE_DIAG_IRQ_TEST] = -ret; + + sxe_reset(adapter); + + if (adapter->cap & (SXE_SRIOV_ENABLE | SXE_MACVLAN_ENABLE)) { + LOG_MSG_INFO(hw, "skip mac loopback diagnostic in vt mode\n"); + result[SXE_DIAG_LOOPBACK_TEST] = 0; + goto skip_loopback; + } + + LOG_MSG_INFO(hw, "loopback testing starting\n"); + ret = sxe_loopback_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXE_DIAG_LOOPBACK_TEST] = -ret; + +skip_loopback: + sxe_reset(adapter); + + clear_bit(SXE_TESTING, &adapter->state); + + if (if_running) { + sxe_open(netdev); + } else if (adapter->phy_ctxt.ops->sfp_tx_laser_disable) { + adapter->phy_ctxt.ops->sfp_tx_laser_disable(adapter); + } + +skip_ol_tests: + msleep_interruptible(SXE_TEST_SLEEP_TIME * SXE_HZ_TRANSTO_MS); +l_end: + LOG_INFO_BDF("ethtool -t end\n"); + return; +} + +static int sxe_regs_len_get(struct net_device *netdev) +{ + return SXE_ETHTOOL_DUMP_REGS_LEN; +} + +static void sxe_regs_get(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + u32 i; + u64 *p; + u8 dump_regs_num; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + memset(data, 0, SXE_ETHTOOL_DUMP_REGS_LEN); + + regs->version = 0; + + stats_lock(adapter); + dump_regs_num = hw->stat.ops->mac_stats_dump(hw, data, + SXE_MAC_REGS_VAL_LEN); + + LOG_DEBUG_BDF("mac stats:\n"); + p = (u64 *)(((u8 *)data) + SXE_MAC_REGS_VAL_LEN); + for (i = 0; i < SXE_MAC_STATS_REGS_NUM; i++) { + p[i] = *(u64 *)(((s8 *)&adapter->stats.hw) + + mac_stats[i].stat_offset); + LOG_DEBUG_BDF("%s:%llu\n", mac_stats[i].stat_string, p[i]); + } + + dump_regs_num += SXE_MAC_STATS_REGS_NUM; + + if (dump_regs_num != SXE_ETHTOOL_DUMP_REGS_NUM) { + LOG_WARN_BDF("dump_regs_num=%u, regs_num_max=%u\n", + dump_regs_num, (u32)SXE_ETHTOOL_DUMP_REGS_NUM); + } + stats_unlock(adapter); + + return; +} + +static s32 sxe_identify_led_ctrl(struct sxe_adapter *adapter, bool is_blink) +{ + s32 ret; + s32 resp; + struct sxe_led_ctrl ctrl; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + ctrl.mode = (true == is_blink) ? SXE_IDENTIFY_LED_BLINK_ON : \ + SXE_IDENTIFY_LED_BLINK_OFF; + ctrl.duration = 0; + + cmd.req = &ctrl; + cmd.req_len = sizeof(ctrl); + cmd.resp = &resp; + cmd.resp_len = sizeof(resp); + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_LED_CTRL; + cmd.is_interruptible = false; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led ctrl\n", ret); + ret = -EIO; + } + + return ret; +} + +int sxe_phys_id_set(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + int ret = 0; + struct sxe_adapter *adapter = netdev_priv(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ret = sxe_identify_led_ctrl(adapter, true); + if (ret) { + LOG_ERROR_BDF("led active failed, ret=%d\n", ret); + } + break; + + case ETHTOOL_ID_INACTIVE: + ret = sxe_identify_led_ctrl(adapter, false); + if (ret) { + LOG_ERROR_BDF("led inactive failed, ret=%d\n", ret); + } + break; + default: + LOG_ERROR_BDF("identify led dont support ON/OFF, state=%d\n", state); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int sxe_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *info) +{ + s32 ret; + bool page_swap = false; + u8 sff8472_rev, addr_mode; + struct sxe_adapter *adapter = netdev_priv(netdev); + + ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_COMPLIANCE, + sizeof(sff8472_rev), &sff8472_rev); + if (ret) { + ret = -EIO; + goto l_end; + } + + ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_8472_DIAG_MONITOR_TYPE, + sizeof(addr_mode), &addr_mode); + if (ret) { + ret = -EIO; + goto l_end; + } + + if (addr_mode & SXE_SFF_ADDRESSING_MODE) { + LOG_MSG_ERR(drv, "address change required to access page 0xA2, " + "but not supported. please report the module " + "type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev == SXE_SFF_8472_UNSUP) || page_swap || \ + !(addr_mode & SXE_SFF_DDM_IMPLEMENTED)) { + info->type = ETH_MODULE_SFF_8079; + info->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + info->type = ETH_MODULE_SFF_8472; + info->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + + LOG_INFO("sfp support management is %x, eeprom addr mode=%x " + "eeprom type=%x, eeprom len=%d\n", + sff8472_rev, addr_mode, info->type, info->eeprom_len); + +l_end: + return ret; +} + +static int sxe_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eep, u8 *data) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (eep->len == 0) { + ret = -EINVAL; + goto l_end; + } + + if (test_bit(SXE_IN_SFP_INIT, &adapter->state)) { + ret = -EBUSY; + goto l_end; + } + + ret = sxe_sfp_eeprom_read(adapter, eep->offset, eep->len, data); + if (ret) { + LOG_ERROR("read sfp failed\n"); + } + +l_end: + return ret; +} + +static int sxe_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + return sxe_irq_coalesce_get(netdev, ec); +} + +static int sxe_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + return sxe_irq_coalesce_set(netdev, ec); +} + +static const struct ethtool_ops sxe_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif + .get_drvinfo = sxe_get_drvinfo, + .nway_reset = sxe_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = sxe_get_ringparam, + .set_ringparam = sxe_set_ringparam, + .get_channels = sxe_get_channels, + .set_channels = sxe_set_channels, + .get_strings = sxe_get_strings, + .get_sset_count = sxe_get_sset_count, + .get_ethtool_stats = sxe_get_ethtool_stats, + .get_rxnfc = sxe_get_rxnfc, + .set_rxnfc = sxe_set_rxnfc, + .get_rxfh_indir_size = sxe_rss_indir_size, + .get_rxfh_key_size = sxe_get_rxfh_key_size, + .get_rxfh = sxe_get_rxfh, + .set_rxfh = sxe_set_rxfh, + .get_priv_flags = sxe_get_priv_flags, + .set_priv_flags = sxe_set_priv_flags, + .get_ts_info = sxe_get_ts_info, + .set_phys_id = sxe_phys_id_set, + .set_link_ksettings = sxe_set_link_ksettings_proto, + .get_link_ksettings = sxe_get_link_ksettings_proto, + .self_test = sxe_diag_test, + .get_pauseparam = sxe_get_pauseparam, + .set_pauseparam = sxe_set_pauseparam, + .set_coalesce = sxe_set_coalesce, + .get_coalesce = sxe_get_coalesce, + .get_wol = sxe_get_wol, + .set_wol = sxe_set_wol, + .get_msglevel = sxe_get_msglevel, + .set_msglevel = sxe_set_msglevel, + .get_regs_len = sxe_regs_len_get, + .get_regs = sxe_regs_get, + .get_module_info = sxe_get_module_info, + .get_module_eeprom = sxe_get_module_eeprom, +}; + +void sxe_ethtool_ops_set(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxe_ethtool_ops; + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.h new file mode 100644 index 000000000000..3d3c4e5bd328 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ethtool.h @@ -0,0 +1,106 @@ +#ifndef __SXE_ETHTOOL_H__ +#define __SXE_ETHTOOL_H__ + +#include +#include "sxe.h" + +#define SXE_FNAV_RULES_TABLE_SIZE_UNIT (1024) +#define UDP_RSS_FLAGS (SXE_RSS_FIELD_IPV4_UDP | \ + SXE_RSS_FIELD_IPV6_UDP) + +#define SXE_SFP_INIT_WAIT_ITR_MIN (1000) +#define SXE_SFP_INIT_WAIT_ITR_MAX (2000) + +#define SXE_TEST_GSTRING_ARRAY_SIZE sxe_self_test_suite_num_get() + +#define SXE_RX_RING_NUM netdev->num_tx_queues +#define SXE_STATS_ARRAY_SIZE sxe_stats_num_get() +#define SXE_RING_STATS_LEN ( \ + (netdev->num_tx_queues + SXE_RX_RING_NUM) * \ + (sizeof(struct sxe_ring_stats) / sizeof(u64))) +#define SXE_DBU_PKT_BUF_STATS_LEN ( \ + (sizeof(((struct sxe_adapter *)0)->stats.hw.dburxtcin) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.dburxtcout) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.dburxgdreecnt) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.dburxdrofpcnt) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.dbutxtcin) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.dbutxtcout)) \ + / sizeof(u64)) + +#define SXE_DMA_QUEUE_STATS_NUM 5 +#define SXE_DMA_QUEUE_STATS_LEN ( SXE_DCB_8_TC * SXE_DMA_QUEUE_STATS_NUM ) + +#define SXE_DMA_STATS_LEN (SXE_DMA_QUEUE_STATS_LEN + sxe_dma_stats_num_get()) + +#define SXE_FC_STATS_LEN ( \ + (sizeof(((struct sxe_adapter *)0)->stats.hw.prcpf) + \ + sizeof(((struct sxe_adapter *)0)->stats.hw.pfct)) \ + / sizeof(u64) ) + +#define SXE_STATS_LEN (SXE_STATS_ARRAY_SIZE + \ + SXE_RING_STATS_LEN + \ + SXE_DBU_PKT_BUF_STATS_LEN + \ + SXE_DMA_STATS_LEN + \ + SXE_FC_STATS_LEN) + +#define SXE_PRIV_FLAGS_LEGACY_RX BIT(0) +#ifdef SXE_IPSEC_CONFIGURE +#define SXE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) +#endif +#define SXE_PRIV_FLAGS_STR_LEN sxe_priv_flags_num_get() + +#define SXE_ETHTOOL_DUMP_REGS_NUM (SXE_MAC_REGS_NUM + \ + SXE_MAC_STATS_REGS_NUM) +#define SXE_ETHTOOL_DUMP_REGS_LEN (SXE_MAC_REGS_VAL_LEN + \ + SXE_MAC_STATS_REGS_VAL_LEN) +#define SXE_MAC_REGS_NUM sxe_mac_reg_num_get() +#define SXE_MAC_REGS_VAL_LEN (SXE_MAC_REGS_NUM * sizeof(u32)) +#define SXE_MAC_STATS_REGS_NUM sxe_mac_stats_regs_num_get() +#define SXE_MAC_STATS_REGS_VAL_LEN (sizeof(u64) * SXE_MAC_STATS_REGS_NUM) + +#define SXE_LOOPBACK_TEST_DESC_COUNT 64 +#define SXE_LOOPBACK_TEST_LOOP 2 +#define SXE_LOOPBACK_TEST_FRAME_SIZE 1024 + +#define SXE_LPBK_TX_DISB_WAIT_MIN (10000) +#define SXE_LPBK_TX_DISB_WAIT_MAX (20000) + +#define SXE_NIC_RESET_WAIT_MIN (1000) +#define SXE_NIC_RESET_WAIT_MAX (2000) + +enum { + NETDEV_STATS, + SXE_STATS +}; + +struct sxe_ethtool_stats { + s8 stat_string[ETH_GSTRING_LEN]; + u32 type; + u32 sizeof_stat; + u32 stat_offset; +}; + +struct sxe_mac_stats_info { + s8 stat_string[ETH_GSTRING_LEN]; + u32 stat_offset; +}; + +u32 sxe_dma_stats_num_get(void); + +u32 sxe_mac_stats_regs_num_get(void); + +u32 sxe_self_test_suite_num_get(void); + +u32 sxe_stats_num_get(void); + +u32 sxe_priv_flags_num_get(void); + +void sxe_ethtool_ops_set(struct net_device *netdev); + +s32 sxe_fnav_dest_queue_parse(struct sxe_adapter *adapter, + u64 ring_cookie, + u8 *queue); + +s32 sxe_fw_wol_set(struct sxe_adapter *adapter, u32 enable); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.c new file mode 100644 index 000000000000..3381f382fb54 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.c @@ -0,0 +1,940 @@ + +#include +#include +#include + +#include "sxe_filter.h" +#include "sxe.h" +#include "sxe_hw.h" +#include "sxe_regs.h" +#include "sxe_tx_proc.h" +#include "sxe_ethtool.h" + +extern struct workqueue_struct *sxe_fnav_workqueue; +extern struct kmem_cache *fnav_cache; + +#define SXE_FNAV_BKT_HASH_MASK 0x1FFF +#define SXE_FNAV_HASH_REG_MASK 0xFFFFFFFF + +#define SXE_SAMPLE_WORD_BITS (16) + +s32 sxe_uc_addr_add(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table, + const u8 *addr, u16 pool) +{ + struct sxe_adapter *adapter = hw->adapter; + struct sxe_uc_addr_table *entry; + s32 ret; + u32 i; + + if (is_zero_ether_addr(addr)) { + ret = -EINVAL; + LOG_ERROR_BDF("mac addr is zero.(err:%d)\n", ret); + goto l_out; + } + + for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) { + entry = &uc_table[i]; + + if (!test_and_set_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state)) { + ether_addr_copy(entry->addr, addr); + entry->pool = pool; + + hw->filter.mac.ops->uc_addr_add(hw, + i, entry->addr, entry->pool); + ret = i; + LOG_INFO("mac addr:%pM pool:%u add to " + "uc_table[%u] success.\n", + addr, pool, i); + goto l_out; + } + } + + ret = -ENOMEM; + LOG_ERROR_BDF("index:%u mac addr:%pM pool:%u add to uc filter fail.\n", + i, addr, pool); + +l_out: + return ret; +} + +s32 sxe_uc_addr_del(struct sxe_hw *hw, struct sxe_uc_addr_table *uc_table, + const u8 *addr, u16 pool) +{ + struct sxe_adapter *adapter = hw->adapter; + struct sxe_uc_addr_table *entry; + s32 ret = 0; + u32 i; + + if (is_zero_ether_addr(addr)) { + ret = -EINVAL; + LOG_WARN_BDF("mac addr is zero.(err:%d)\n", ret); + goto l_out; + } + + for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) { + entry = &uc_table[i]; + if ((entry->pool == pool) && + (ether_addr_equal(addr, entry->addr))) { + if ((test_and_clear_bit(SXE_UC_ADDR_ENTRY_USED, + &entry->state))) { + hw->filter.mac.ops->uc_addr_del(hw, i); + LOG_INFO("pool:%u mac addr:%pM uc_filter_addr[%u] " + "entry del success.\n", + pool, addr, i); + goto l_out; + } + } + } + + ret = -ENOMEM; + + LOG_ERROR_BDF("index:%u mac addr:%pM pool:%u delete fail due to " + "not exsit in uc filter.\n", + i, addr, pool); + +l_out: + return ret; +} + +s32 sxe_uc_sync(struct net_device *netdev, const u8 *addr) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + struct sxe_uc_addr_table *uc_table = + adapter->mac_filter_ctxt.uc_addr_table; + + ret = sxe_uc_addr_add(hw, uc_table, addr, PF_POOL_INDEX(0)); + + return min_t(s32, ret, 0); +} + +s32 sxe_uc_unsync(struct net_device *netdev, const u8 *addr) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + ret = sxe_uc_addr_del(hw, adapter->mac_filter_ctxt.uc_addr_table, + addr, PF_POOL_INDEX(0)); + if (ret) { + LOG_ERROR_BDF("pool idx:%d addr:%pM del fail.\n", PF_POOL_INDEX(0), addr); + } + + return 0; +} + +static void sxe_mc_hash_table_add(struct sxe_adapter *adapter, u8 *mc_addr) +{ + u16 extracted; + u16 bit_index; + u16 reg_index; + + adapter->mac_filter_ctxt.mc_hash_table_used++; + + extracted = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + + extracted &= SXE_MC_ADDR_EXTRACT_MASK; + LOG_DEV_DEBUG(" bit-vector = 0x%03X\n", extracted); + + reg_index = (extracted >> SXE_MC_ADDR_SHIFT) & SXE_MC_ADDR_REG_MASK; + + bit_index = extracted & SXE_MC_ADDR_BIT_MASK; + + adapter->mac_filter_ctxt.mc_hash_table[reg_index] |= BIT(bit_index); + + LOG_INFO("mc_addr:%pM extracted:0x%x reg_index:%u bit_index:%u " + "add to mc_hash_table success.\n", + mc_addr, extracted, reg_index, bit_index); + + return; +} + +#ifdef CONFIG_PCI_IOV +void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt; + u8 i; + u8 j; + u8 reg_idx; + u8 bit_idx; + u32 filter_ctl; + + for (i = 0; i < vt_ctxt->num_vfs; i++) { + struct sxe_vf_info *vf_info = &vt_ctxt->vf_info[i]; + for (j = 0; j < vf_info->mc_hash_used; j++) { + reg_idx = (vf_info->mc_hash[j] >> SXE_MC_ADDR_SHIFT) & + SXE_MC_ADDR_REG_MASK; + bit_idx = vf_info->mc_hash[j] & SXE_MC_ADDR_BIT_MASK; + hw->filter.mac.ops->mta_hash_table_update(hw, reg_idx, bit_idx); + LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x" + "reg_idx=%u, bit_idx=%u.\n", + i, vf_info->mc_hash_used, j, vf_info->mc_hash[j], + reg_idx, bit_idx); + } + + filter_ctl = hw->filter.mac.ops->pool_rx_mode_get(hw, i); + if (vf_info->mc_hash_used) { + filter_ctl |= SXE_VMOLR_ROMPE; + + } else { + filter_ctl &= ~SXE_VMOLR_ROMPE; + } + hw->filter.mac.ops->pool_rx_mode_set(hw, filter_ctl, i); + } + + return; +} +#else +void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter) +{ + return; +} +#endif + +s32 sxe_mc_addr_add(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + struct netdev_hw_addr *hw_addr; + struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt; + u8 i; + + if (!netif_running(netdev)) { + return 0; + } + + LOG_DEV_DEBUG("clearing MTA.\n"); + mac_filter->mc_hash_table_used = 0; + memset(mac_filter->mc_hash_table, 0, + sizeof(mac_filter->mc_hash_table)); + + netdev_for_each_mc_addr(hw_addr, netdev) { + LOG_DEV_DEBUG("adding the multicast addresses:\n"); + sxe_mc_hash_table_add(adapter, hw_addr->addr); + } + + for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) { + hw->filter.mac.ops->mta_hash_table_set(hw, i, mac_filter->mc_hash_table[i]); + } + + if (mac_filter->mc_hash_table_used) { + hw->filter.mac.ops->mc_filter_enable(hw); + } + + sxe_vf_mc_addr_restore(adapter); + + LOG_DEV_DEBUG("sxe_mc_addr_add complete.\n"); + + return netdev_mc_count(netdev); +} + +void sxe_fc_mac_addr_set(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u8 mac_addr[ETH_ALEN]; + + memcpy(mac_addr, adapter->mac_filter_ctxt.cur_mac_addr, ETH_ALEN); + hw->filter.mac.ops->fc_mac_addr_set(hw, mac_addr); + + return; +} + +void sxe_mac_addr_set(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_uc_addr_table *entry = + &adapter->mac_filter_ctxt.uc_addr_table[SXE_DEFAULT_UC_ADDR_IDX]; + + memcpy(&entry->addr, adapter->mac_filter_ctxt.cur_mac_addr, ETH_ALEN); + entry->pool = PF_POOL_INDEX(0); + + set_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state); + + hw->filter.mac.ops->uc_addr_add(hw, SXE_DEFAULT_UC_ADDR_IDX, + entry->addr, entry->pool); + + sxe_fc_mac_addr_set(adapter); + + return; +} + +static s32 sxe_uc_filter_init(struct sxe_adapter *adapter) +{ + s32 ret = 0; + struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt; + + mac_filter->uc_addr_table = kcalloc(SXE_UC_ENTRY_NUM_MAX, + sizeof(struct sxe_uc_addr_table), GFP_KERNEL); + if (!mac_filter->uc_addr_table) { + ret = -ENOMEM; + LOG_ERROR_BDF("rar entry:%d size:%lu mac table kcalloc fail.(err:%d)", + SXE_UC_ENTRY_NUM_MAX, + sizeof(struct sxe_uc_addr_table), ret); + } + + return ret; +} + +s32 sxe_mac_filter_init(struct sxe_adapter *adapter) +{ + s32 ret; + + ret = sxe_uc_filter_init(adapter); + if (ret) { + LOG_ERROR_BDF("uc filter init failed\n"); + goto l_ret; + } + +l_ret: + return ret; +} + +void sxe_mac_filter_reset(struct sxe_adapter *adapter) +{ + struct sxe_uc_addr_table *entry; + u32 i; + + for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) { + entry = &adapter->mac_filter_ctxt.uc_addr_table[i]; + + clear_bit(SXE_UC_ADDR_ENTRY_USED, &entry->state); + } + + adapter->mac_filter_ctxt.mc_hash_table_used = 0; + return; +} + +void sxe_mac_filter_destroy(struct sxe_adapter *adapter) +{ + if (adapter->mac_filter_ctxt.uc_addr_table) { + kfree(adapter->mac_filter_ctxt.uc_addr_table); + } + + return; +} + +void sxe_fnav_rules_restore(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct hlist_node *node; + struct sxe_fnav_rule_node *rule; + u64 ring_cookie; + u8 queue; + + spin_lock(&adapter->fnav_ctxt.specific_lock); + + if (!hlist_empty(&adapter->fnav_ctxt.rules_list)) { + hw->dbu.ops->fnav_specific_rule_mask_set(hw, + &adapter->fnav_ctxt.rules_mask); + + hlist_for_each_entry_safe(rule, node, + &adapter->fnav_ctxt.rules_list, node) { + ring_cookie = rule->ring_cookie; + + sxe_fnav_dest_queue_parse(adapter, + ring_cookie, &queue); + + hw->dbu.ops->fnav_specific_rule_add(hw, + &rule->rule_info, + rule->sw_idx, + queue); + } + } + + spin_unlock(&adapter->fnav_ctxt.specific_lock); + + return; +} + +void sxe_fnav_rules_clean(struct sxe_adapter *adapter) +{ + struct hlist_node *container_node; + struct sxe_fnav_rule_node *rule; + + spin_lock(&adapter->fnav_ctxt.specific_lock); + + hlist_for_each_entry_safe(rule, container_node, + &adapter->fnav_ctxt.rules_list, + node) { + hlist_del(&rule->node); + kfree(rule); + } + adapter->fnav_ctxt.rule_cnt = 0; + + spin_unlock(&adapter->fnav_ctxt.specific_lock); + + return ; +} + +static bool sxe_fnav_is_sample_protocol_supported(__be16 protocol) +{ + return !((protocol != htons(ETH_P_IP)) && + (protocol != htons(ETH_P_IPV6))); +} + +static s32 sxe_fnav_sample_header_len_check(struct sk_buff *skb, + union sxe_sample_data_hdr *hdr) +{ + s32 ret = 0; + if (unlikely(hdr->network <= skb->data)) { + ret = -SXE_ERR_PARAM; + LOG_DEBUG("hdr.network <= skb->data\n"); + goto l_end; + } + + if (unlikely(skb_tail_pointer(skb) < hdr->network + 40)) { + ret = -SXE_ERR_PARAM; + LOG_DEBUG("skb_tail_pointer(skb) < hdr->network + 40\n"); + goto l_end; + } + +l_end: + return ret; +} + +static s32 sxe_fnav_sample_tcp_ip_header_check( + union sxe_sample_data_hdr *hdr, + struct sk_buff *skb, + unsigned int *hlen) +{ + int l4_proto; + s32 ret = 0; + + switch (hdr->ipv4->version) { + case SXE_IPV4: + *hlen = (hdr->network[0] & 0x0F) << 2; + l4_proto = hdr->ipv4->protocol; + break; + case SXE_IPV6: + *hlen = hdr->network - skb->data; + l4_proto = ipv6_find_hdr(skb, hlen, IPPROTO_TCP, NULL, NULL); + *hlen -= hdr->network - skb->data; + break; + default: + ret = -SXE_ERR_PARAM; + LOG_ERROR("unsupported l3 protocol:%d\n", hdr->ipv4->version); + goto l_end;; + } + + if (l4_proto != IPPROTO_TCP) { + ret = -SXE_ERR_PARAM; + LOG_INFO("unsupported l4 protocol:%d\n", l4_proto); + goto l_end; + } + + if (unlikely(skb_tail_pointer(skb) < hdr->network + + *hlen + sizeof(struct tcphdr))) { + ret = -SXE_ERR_PARAM; + LOG_ERROR("error on length skb_tail_pointer=0x%p < " + "(hdr->network + *hlen + sizeof(struct tcphdr))=0x%p\n", + skb_tail_pointer(skb), + (hdr->network + *hlen + sizeof(struct tcphdr))); + goto l_end; + } + +l_end: + return ret; +} + +static void sxe_sample_hash_iter_compute(u8 bit_n, + u32 *common_hash, u32 *bucket_hash, + u32 *sig_hash, u32 lo_hash_dword, + u32 hi_hash_dword) +{ + u32 n = bit_n; + + if (SXE_SAMPLE_COMMON_HASH_KEY & BIT(n)) { + *common_hash ^= lo_hash_dword >> n; + } else if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n)) { + *bucket_hash ^= lo_hash_dword >> n; + } else if (SXE_FNAV_SAMPLE_HASH_KEY & BIT(n)) { + *sig_hash ^= lo_hash_dword << (SXE_SAMPLE_WORD_BITS - n); + } + + if (SXE_SAMPLE_COMMON_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS)) { + *common_hash ^= hi_hash_dword >> n; + } else if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS)) { + *bucket_hash ^= hi_hash_dword >> n; + } else if (SXE_FNAV_SAMPLE_HASH_KEY & BIT(n + SXE_SAMPLE_WORD_BITS)) { + *sig_hash ^= hi_hash_dword << (SXE_SAMPLE_WORD_BITS - n); + } + + return; +} + +static u32 sxe_sample_hash_compute(union sxe_sample_hash_dword input, + union sxe_sample_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + u8 i; + + flow_vm_vlan = ntohl(input.dword); + + hi_hash_dword = ntohl(common.dword); + + lo_hash_dword = (hi_hash_dword >> SXE_SAMPLE_WORD_BITS) | (hi_hash_dword << SXE_SAMPLE_WORD_BITS); + + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> SXE_SAMPLE_WORD_BITS); + + sxe_sample_hash_iter_compute(0, &common_hash, &bucket_hash, &sig_hash, + lo_hash_dword, hi_hash_dword); + + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << SXE_SAMPLE_WORD_BITS); + + for (i = 1; i < SXE_SAMPLE_WORD_BITS; i++) { + sxe_sample_hash_iter_compute(i, &common_hash, + &bucket_hash, &sig_hash, + lo_hash_dword, hi_hash_dword); + + } + + bucket_hash ^= common_hash; + bucket_hash &= SXE_SAMPLE_HASH_MASK; + + sig_hash ^= common_hash << SXE_SAMPLE_WORD_BITS; + sig_hash &= SXE_SAMPLE_HASH_MASK << SXE_SAMPLE_WORD_BITS; + + return sig_hash ^ bucket_hash; +} + +static void sxe_fnav_sample_rule_add(struct sxe_adapter *adapter, + u64 hash_cmd) +{ + struct sxe_hw * hw = &adapter->hw; + struct sxe_fnav_sample_filter *input, *filter; + u32 key; + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) { + LOG_ERROR_BDF("fnav sample rule add failed, no memory\n"); + hw->dbu.ops->fnav_single_sample_rule_del(hw, + (u32)(hash_cmd & SXE_FNAV_HASH_REG_MASK)); + goto l_end; + } + + key = (hash_cmd & SXE_FNAV_BKT_HASH_MASK); + input->hash = (u32)(hash_cmd & SXE_FNAV_HASH_REG_MASK); + spin_lock(&adapter->fnav_ctxt.sample_lock); + hash_for_each_possible(adapter->fnav_ctxt.sample_list, filter, hlist, key) { + if (filter->hash == input->hash) { + kfree(input); + goto l_unlock; + } + } + + + hash_add(adapter->fnav_ctxt.sample_list, &input->hlist, key); + adapter->fnav_ctxt.sample_rules_cnt++; + +l_unlock: + spin_unlock(&adapter->fnav_ctxt.sample_lock); +l_end: + return; +} + +static void sxe_fnav_sample_rule_add_task(struct work_struct *work) +{ + struct sxe_fnav_sample_work_info *sample_work = container_of(work, + struct sxe_fnav_sample_work_info, + work_st); + if (!sample_work) { + goto l_end; + } + + sxe_fnav_sample_rule_add(sample_work->adapter, sample_work->hash); + kmem_cache_free(fnav_cache, sample_work); + +l_end: + return; +} + +s32 sxe_fnav_sample_rule_get(struct sxe_ring *ring, + struct sxe_tx_buffer *tx_buffer) +{ + struct sxe_irq_data *irq_data = ring->irq_data; + struct sxe_hw *hw = &irq_data->adapter->hw; + struct sxe_adapter *adapter = hw->adapter; + union sxe_sample_hash_dword input = { .dword = 0 }; + union sxe_sample_hash_dword common = { .dword = 0 }; + union sxe_sample_data_hdr hdr; + struct tcphdr *th; + unsigned int hlen; + struct sk_buff *skb; + __be16 vlan_id; + bool is_supported; + s32 ret; + u32 hash_value; + u64 hash_cmd; + struct sxe_fnav_sample_work_info *add_work = NULL; + + LOG_DEBUG_BDF("in sample mode, sample_rate=%u, fnav_sample_count=%u\n", + ring->fnav_sample_rate, ring->fnav_sample_count); + if (!irq_data || !ring->fnav_sample_rate) { + goto l_end; + } + + ring->fnav_sample_count++; + + is_supported = sxe_fnav_is_sample_protocol_supported(tx_buffer->protocol); + if (!is_supported) { + LOG_DEBUG_BDF("sample protocol=[%d] unsupported\n",tx_buffer->protocol); + goto l_end; + } + + skb = tx_buffer->skb; + hdr.network = skb_network_header(skb); + + ret = sxe_fnav_sample_header_len_check(skb, &hdr); + if (ret) { + LOG_ERROR_BDF("sample header len check failed. ret=%d\n", ret); + goto l_end; + } + + ret = sxe_fnav_sample_tcp_ip_header_check(&hdr, skb, &hlen); + if (ret) { + LOG_INFO("sample tcp ip process err. ret=%d\n", ret); + goto l_end; + } + + th = (struct tcphdr *)(hdr.network + hlen); + + LOG_DEBUG_BDF("tcp is fin ? :%s, is syn ? :%s\n" , + th->fin ? "yes" : "no", th->syn ? "yes" : "no"); + + if ((th->fin) || + (!th->syn && (ring->fnav_sample_count < ring->fnav_sample_rate))) { + goto l_end; + } + + ring->fnav_sample_count = 0; + + vlan_id = htons(tx_buffer->tx_features >> SXE_TX_FEATURE_VLAN_SHIFT); + + input.formatted.vlan_id = vlan_id; + + if (tx_buffer->tx_features & (SXE_TX_FEATURE_SW_VLAN | SXE_TX_FEATURE_HW_VLAN)) { + common.port.src ^= th->dest ^ htons(ETH_P_8021Q); + } else { + common.port.src ^= th->dest ^ tx_buffer->protocol; + } + + common.port.dst ^= th->source; + + switch (hdr.ipv4->version) { + case SXE_IPV4: + input.formatted.flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + break; + case SXE_IPV6: + input.formatted.flow_type = SXE_SAMPLE_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + break; + default: + break; + } + + LOG_DEBUG_BDF("fnav sample success, start write hw\n"); + + hash_value = sxe_sample_hash_compute(input, common); + hw->dbu.ops->fnav_sample_hash_cmd_get(hw, input.formatted.flow_type, + hash_value, ring->idx, &hash_cmd); + + if (!adapter->fnav_ctxt.is_sample_table_overflowed && + !workqueue_congested(WORK_CPU_UNBOUND, sxe_fnav_workqueue)) { + add_work = kmem_cache_zalloc(fnav_cache, GFP_ATOMIC); + if (!add_work) { + return -ENOMEM; + } + + INIT_WORK(&add_work->work_st, sxe_fnav_sample_rule_add_task); + add_work->adapter = adapter; + add_work->hash = hash_cmd; + queue_work(sxe_fnav_workqueue, &add_work->work_st); + hw->dbu.ops->fnav_sample_hash_set(hw, hash_cmd); + } + +l_end: + return 0; +} + +static void sxe_fnav_sw_specific_rule_add(struct sxe_adapter *adapter, + struct sxe_fnav_rule_node *add_rule) +{ + struct sxe_hw *hw = &adapter->hw; + struct hlist_node *next; + struct sxe_fnav_rule_node *rule = NULL; + struct sxe_fnav_rule_node *pre_node = NULL; + u16 sw_idx = add_rule->sw_idx; + s32 ret; + + hlist_for_each_entry_safe(rule, next, + &adapter->fnav_ctxt.rules_list, + node) { + if (rule->sw_idx >= sw_idx) { + break; + } + pre_node = rule; + } + LOG_DEBUG_BDF("add specific fnav rule in sw_idx[%u]\n",sw_idx); + + if (rule && (rule->sw_idx == sw_idx)) { + LOG_DEBUG_BDF("rule->sw_idx == sw_idx == %u, show bkt_hash." + "old bkt_hash[0x%x], input new bkt_hash[0x%x]\n", + sw_idx, rule->rule_info.ntuple.bkt_hash, + add_rule->rule_info.ntuple.bkt_hash); + if (rule->rule_info.ntuple.bkt_hash != + add_rule->rule_info.ntuple.bkt_hash) { + + ret = hw->dbu.ops->fnav_specific_rule_del(hw, + &rule->rule_info, + sw_idx); + if (ret) { + LOG_ERROR_BDF("delete fnav rule in sw_idx[%d]" + "failed\n", sw_idx); + } + } + + hlist_del(&rule->node); + kfree(rule); + adapter->fnav_ctxt.rule_cnt--; + } + + INIT_HLIST_NODE(&add_rule->node); + + if (pre_node) { + hlist_add_behind(&add_rule->node, &pre_node->node); + } else { + hlist_add_head(&add_rule->node, + &adapter->fnav_ctxt.rules_list); + } + + adapter->fnav_ctxt.rule_cnt++; + + return; +} + +static void sxe_fnav_specific_hash_iter_compute(u8 bit_n, + u32 *bucket_hash, u32 lo_hash_dword, + u32 hi_hash_dword) +{ + u32 n = bit_n; + if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n)) { + *bucket_hash ^= lo_hash_dword >> n; + } + + if (SXE_FNAV_BUCKET_HASH_KEY & BIT(n + 16)) { + *bucket_hash ^= hi_hash_dword >> n; + } + + return; +} + +static void sxe_fnav_specific_hash_compute( + union sxe_fnav_rule_info *input_rule, + union sxe_fnav_rule_info *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + __be32 hi_dword = 0; + u8 i; + + for (i = 0; i <= 10; i++) { + input_rule->fast_access[i] &= input_mask->fast_access[i]; + } + + flow_vm_vlan = ntohl(input_rule->fast_access[0]); + + for (i = 1; i <= 10; i++) { + hi_dword ^= input_rule->fast_access[i]; + } + + hi_hash_dword = ntohl(hi_dword); + + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + sxe_fnav_specific_hash_iter_compute(0, &bucket_hash, + lo_hash_dword, hi_hash_dword); + + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + for (i = 1; i <= 15; i++) { + sxe_fnav_specific_hash_iter_compute(i, &bucket_hash, + lo_hash_dword, hi_hash_dword); + } + + input_rule->ntuple.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); + LOG_DEBUG("fnav bkt_hash=0x%x\n", input_rule->ntuple.bkt_hash); + + return ; +} + +s32 sxe_fnav_specific_rule_add_process( + struct sxe_adapter *adapter, + struct sxe_fnav_rule_node *input_rule, + union sxe_fnav_rule_info *mask, + u8 queue) +{ + s32 ret = 0; + struct sxe_hw *hw = &adapter->hw; + + spin_lock(&adapter->fnav_ctxt.specific_lock); + + LOG_DEBUG_BDF("add specific fnav mask---rule_info:vm_pool[%u], flow_type[0x%x]" + "vlan_id[%u], dst_ip[%x:%x:%x:%x], src_ip[%x:%x:%x:%x]" + "dst_port[%u], src_port[%u], flex_bytes[0x%x], bkt_hash[0x%x]\n", + mask->ntuple.vm_pool, + mask->ntuple.flow_type, + mask->ntuple.vlan_id, + mask->ntuple.dst_ip[0], + mask->ntuple.dst_ip[1], + mask->ntuple.dst_ip[2], + mask->ntuple.dst_ip[3], + mask->ntuple.src_ip[0], + mask->ntuple.src_ip[1], + mask->ntuple.src_ip[2], + mask->ntuple.src_ip[3], + mask->ntuple.dst_port, + mask->ntuple.src_port, + mask->ntuple.flex_bytes, + mask->ntuple.bkt_hash + ); + if (hlist_empty(&adapter->fnav_ctxt.rules_list)) { + LOG_DEBUG_BDF("new fnav mask added\n"); + memcpy(&adapter->fnav_ctxt.rules_mask, mask, sizeof(*mask)); + + ret = hw->dbu.ops->fnav_specific_rule_mask_set(hw, mask); + if (ret) { + LOG_MSG_ERR(drv, "error writing mask\n"); + goto l_err_unlock; + } + } else if (memcmp(&adapter->fnav_ctxt.rules_mask, mask, sizeof(*mask))) { + LOG_MSG_ERR(drv, "only one mask supported per port\n"); + goto l_err_unlock; + } + + sxe_fnav_specific_hash_compute(&input_rule->rule_info, mask); + + LOG_DEBUG_BDF("add specific fnav rule---filter:vm_pool[%u], flow_type[0x%x]" + "vlan_id[%u], dst_ip[%x:%x:%x:%x], src_ip[%x:%x:%x:%x]" + "dst_port[%u], src_port[%u], flex_bytes[0x%x], bkt_hash[0x%x]" + "sw_idx[%u], ring_cookie[0x%llx]\n", + input_rule->rule_info.ntuple.vm_pool, + input_rule->rule_info.ntuple.flow_type, + input_rule->rule_info.ntuple.vlan_id, + input_rule->rule_info.ntuple.dst_ip[0], + input_rule->rule_info.ntuple.dst_ip[1], + input_rule->rule_info.ntuple.dst_ip[2], + input_rule->rule_info.ntuple.dst_ip[3], + input_rule->rule_info.ntuple.src_ip[0], + input_rule->rule_info.ntuple.src_ip[1], + input_rule->rule_info.ntuple.src_ip[2], + input_rule->rule_info.ntuple.src_ip[3], + input_rule->rule_info.ntuple.dst_port, + input_rule->rule_info.ntuple.src_port, + input_rule->rule_info.ntuple.flex_bytes, + input_rule->rule_info.ntuple.bkt_hash, + input_rule->sw_idx, + input_rule->ring_cookie + ); + ret = hw->dbu.ops->fnav_specific_rule_add(hw, + &input_rule->rule_info, input_rule->sw_idx, queue); + if (ret) { + LOG_ERROR_BDF("set specific rule failed, ret = %d\n", ret); + goto l_err_unlock; + } + + sxe_fnav_sw_specific_rule_add(adapter, input_rule); + + spin_unlock(&adapter->fnav_ctxt.specific_lock); + return 0; + +l_err_unlock: + ret = -EINVAL; + spin_unlock(&adapter->fnav_ctxt.specific_lock); + return ret; +} + +int sxe_fnav_sw_specific_rule_del( + struct sxe_adapter *adapter, + u16 sw_idx) +{ + struct sxe_hw *hw = &adapter->hw; + struct hlist_node *next; + struct sxe_fnav_rule_node *rule = NULL; + int ret = -EINVAL; + + hlist_for_each_entry_safe(rule, next, + &adapter->fnav_ctxt.rules_list, + node) { + if (rule->sw_idx >= sw_idx) { + LOG_INFO("rule->sw_idx = %u; sw_idx = %u\n", + rule->sw_idx, sw_idx); + break; + } + } + + if (rule && (rule->sw_idx == sw_idx)) { + LOG_DEBUG_BDF("delete rule in sw_idx[%u]\n", sw_idx); + ret = hw->dbu.ops->fnav_specific_rule_del(hw, + &rule->rule_info, + sw_idx); + if (ret) { + LOG_ERROR_BDF("delete fnav rule in sw_idx[%d]" + "failed\n", sw_idx); + } + + hlist_del(&rule->node); + kfree(rule); + adapter->fnav_ctxt.rule_cnt--; + } else { + LOG_ERROR_BDF("fnav rule in sw_idx[%u] not found\n", sw_idx); + } + + return ret; +} + +u64 sxe_fnav_max_rule_num_get(u32 rules_table_size) +{ + return (u64)((SXE_FNAV_RULES_TABLE_SIZE_UNIT << + rules_table_size) - 2); +} + +struct sxe_fnav_rule_node* sxe_fnav_specific_rule_find( + struct sxe_adapter *adapter, + u32 location) +{ + struct sxe_fnav_rule_node *rule = NULL; + struct hlist_node *next; + + hlist_for_each_entry_safe(rule, next, + &adapter->fnav_ctxt.rules_list, + node) { + if (location <= rule->sw_idx) { + LOG_INFO("location = %u, sw_idx = %u\n", + location, rule->sw_idx); + break; + } + } + + if (!rule || location != rule->sw_idx) { + rule = NULL; + } + + LOG_INFO("loc[%u] rule find finish and %s\n", location, + rule != NULL ? "found" : "not found"); + + return rule; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.h new file mode 100644 index 000000000000..360893e76d78 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_filter.h @@ -0,0 +1,79 @@ + +#ifndef __SXE_FILTER_H__ +#define __SXE_FILTER_H__ + +#include +#include + +#include "sxe.h" + +#define SXE_DEFAULT_UC_ADDR_IDX (0) + +#define SXE_DEFAULT_MAC_POOL_IDX (0) +#define SXE_UC_ADDR_ENTRY_USED (0x1) + +#define SXE_FNAV_DEFAULT_SAMPLE_RATE (200) + +#define SXE_FNAV_RULES_TABLE_PKT_SIZE (32) + +enum sxe_fnav_rules_table_size { + SXE_FNAV_RULES_TABLE_SIZE_NONE = 0, + SXE_FNAV_RULES_TABLE_SIZE_64K = 1, + SXE_FNAV_RULES_TABLE_SIZE_128K = 2, + SXE_FNAV_RULES_TABLE_SIZE_256K = 3, +}; + +union sxe_sample_data_hdr { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; +}; + +s32 sxe_uc_addr_add(struct sxe_hw *hw,struct sxe_uc_addr_table *uc_table, + const u8 *addr, u16 pool); + +s32 sxe_uc_addr_del(struct sxe_hw *hw,struct sxe_uc_addr_table *uc_table, + const u8 *addr, u16 pool); + +s32 sxe_mc_addr_add(struct net_device *netdev); + +s32 sxe_mac_filter_init(struct sxe_adapter *adapter); + +void sxe_mac_filter_destroy(struct sxe_adapter *adapter); + +s32 sxe_uc_sync(struct net_device *netdev, const u8 *addr); + +s32 sxe_uc_unsync(struct net_device *netdev, const u8 *addr); + +void sxe_mac_filter_reset(struct sxe_adapter *adapter); + +void sxe_mac_addr_set(struct sxe_adapter *adapter); + +void sxe_fnav_rules_restore(struct sxe_adapter *adapter); + +void sxe_fnav_rules_clean(struct sxe_adapter *adapter); + +s32 sxe_fnav_sample_rule_get(struct sxe_ring *ring, + struct sxe_tx_buffer *first_buffer); + +s32 sxe_fnav_specific_rule_add_process( + struct sxe_adapter *adapter, + struct sxe_fnav_rule_node *input_rule, + union sxe_fnav_rule_info *mask, + u8 queue); + +int sxe_fnav_sw_specific_rule_del( + struct sxe_adapter *adapter, + u16 sw_idx); + +u64 sxe_fnav_max_rule_num_get(u32 rules_table_size); + +struct sxe_fnav_rule_node* sxe_fnav_specific_rule_find( + struct sxe_adapter *adapter, + u32 location); + +void sxe_fc_mac_addr_set(struct sxe_adapter *adapter); + +void sxe_vf_mc_addr_restore(struct sxe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.c new file mode 100644 index 000000000000..96d20f55fe0c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.c @@ -0,0 +1,313 @@ + +#include "sxe.h" +#include "sxe_log.h" +#include "sxe_host_cli.h" +#include "sxe_host_hdc.h" +#include "sxe_ioctl.h" + +static dev_t sxe_cdev_major; +static struct class *sxe_cdev_class; + +static struct mutex sxe_minor_lock; +static DEFINE_IDR(sxe_minor_idr); + +static s32 sxe_cli_open(struct inode *inode, struct file *filep) +{ + struct sxe_adapter *adapter; + + adapter = container_of(inode->i_cdev, + struct sxe_adapter, cdev_info.cdev); + LOG_DEBUG_BDF("open char dev of adapter[%p]\n", adapter); + filep->private_data = adapter; + + return 0; +} + +STATIC s32 sxe_cli_user_input_param_check(u64 trace_id, + u8 *in_data, u16 in_len, + u8 *out_data,u16 out_len) +{ + s32 ret = -EINVAL; + + if (in_data == NULL || out_data == NULL) { + LOG_ERROR("trace_id=0x%llx cmd paramter invalid," + "in_data=%p, out_data=%p\n", + trace_id,in_data, out_data); + goto l_out; + } + + if (in_len == 0 || out_len == 0 || in_len > HDC_CACHE_TOTAL_LEN + || out_len > HDC_CACHE_TOTAL_LEN) { + LOG_ERROR("trace_id=0x%llx cmd paramter invalid, " + "inLen=%d, outLen=%d\n", + trace_id, in_len, out_len); + goto l_out; + } + + return 0; +l_out: + return ret; +} + +static s32 sxe_do_cli_cmd(struct sxe_hw *hw, unsigned long arg) +{ + s32 ret = -SXE_FAILED; + u8 *in_data; + u16 in_len; + u8 *out_data; + u16 out_len; + u64 trace_id; + struct sxe_driver_cmd cmd; + struct sxe_adapter *adapter = hw->adapter; + + struct SxeIoctlSyncCmd __user *user_cmd = + (struct SxeIoctlSyncCmd __user*)arg; + + struct SxeIoctlSyncCmd *user_cmd_buf = + kzalloc(sizeof(struct SxeIoctlSyncCmd), GFP_KERNEL); + if (user_cmd_buf == NULL) { + LOG_ERROR_BDF("kzalloc user_cmd_buf mem failed\n"); + ret = -ENOMEM; + goto l_ret; + } + + if (copy_from_user(user_cmd_buf, (void __user*)user_cmd, + sizeof(struct SxeIoctlSyncCmd))) { + LOG_ERROR_BDF("hw[%p] , copy from user err\n",hw); + ret = -EFAULT; + goto l_free; + } + + in_data = user_cmd_buf->inData; + in_len = user_cmd_buf->inLen; + out_data = user_cmd_buf->outData; + out_len = user_cmd_buf->outLen; + trace_id = user_cmd_buf->traceid; + + LOG_DEBUG_BDF("get user cmd: trace_id=0x%llx," + "in_data len=%u, out_data len=%u\n", + trace_id, in_len, out_len); + ret = sxe_cli_user_input_param_check(trace_id, in_data, in_len, + out_data, out_len); + if (ret) { + goto l_free; + } + + cmd.req = in_data; + cmd.req_len = in_len; + cmd.resp = out_data; + cmd.resp_len = out_len; + cmd.trace_id = trace_id; + cmd.opcode = SXE_CMD_MAX; + cmd.is_interruptible = true; + ret = sxe_cli_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("sxe cli cmd trace_id=0x%llx" + "trans error, ret=%d\n", trace_id, ret); + goto l_free; + } + +l_free: + kfree(user_cmd_buf); + user_cmd_buf = NULL; +l_ret: + return ret; +} + +static long sxe_cli_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + long ret = -ENOTTY; + struct sxe_hw *hw; + struct sxe_adapter *adapter; + + if (filep == NULL || cmd == 0 || arg == 0) { + LOG_ERROR("filep=%p cmd=%d arg=%ld\n", filep, cmd, arg); + ret = -EINVAL; + goto l_ioctl_failed; + } + + adapter = (struct sxe_adapter *)filep->private_data; + + LOG_DEBUG_BDF("driver ioctl cmd=%x, arg=0x%lx\n", cmd, arg); + + if (adapter != NULL) { + switch(cmd) + { + case SXE_CMD_IOCTL_SYNC_CMD: + hw = &adapter->hw; + ret = sxe_do_cli_cmd(hw, arg); + break; + default: + LOG_ERROR_BDF("unknown ioctl cmd, filep=%p, cmd=%d," + "arg=0x%8.8lx\n", filep, cmd, arg); + break; + } + } else { + LOG_WARN_BDF("can found cdev\n"); + ret = -ENODEV; + goto l_ioctl_failed; + } + + if(ret) { + LOG_ERROR_BDF("filp=%p, cmd=%d, arg=%lx, ret=%ld\n", + filep, cmd, arg, ret); + goto l_ioctl_failed; + } + + return SXE_SUCCESS; + +l_ioctl_failed: + return ret; +} + +const struct file_operations sxe_cdev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sxe_cli_ioctl, + .open = sxe_cli_open, + .release = NULL, +}; + +STATIC void sxe_pci_addr_get(struct pci_dev *pci_dev, + struct sxe_pci_addr *pci_addr) +{ + pci_addr->domain = pci_domain_nr(pci_dev->bus); + pci_addr->bus = pci_dev->bus->number; + pci_addr->deviceNo = (((pci_dev->devfn) >> PCI_BDF_DEV_SHIFT ) & PCI_BDF_DEV_MASK); + pci_addr->devfn = ((pci_dev->devfn) & PCI_BDF_FUNC_MASK); + + return; +} + +s32 sxe_cli_cdev_register() +{ + s32 ret; + + ret = alloc_chrdev_region(&sxe_cdev_major, 0, + SXE_MAX_DEVICES_NUM, SXE_CHRDEV_NAME); + if (ret) { + LOG_ERROR("alloc cdev number failed\n"); + goto l_alloc_cdev_failed; + } + +#ifdef CLASS_CREATE_NEED_1_PARAM + sxe_cdev_class = class_create(SXE_CHRDEV_CLASS_NAME); +#else + sxe_cdev_class = class_create(THIS_MODULE, SXE_CHRDEV_CLASS_NAME); +#endif + if(IS_ERR(sxe_cdev_class)) { + ret = PTR_ERR(sxe_cdev_class); + LOG_ERROR("create cdev class failed\n"); + goto l_create_class_failed; + } + + mutex_init(&sxe_minor_lock); + + return SXE_SUCCESS; + +l_create_class_failed: + unregister_chrdev_region(sxe_cdev_major, SXE_MAX_DEVICES_NUM); +l_alloc_cdev_failed: + return ret; + +} + +void sxe_cli_cdev_unregister() +{ + class_destroy (sxe_cdev_class); + unregister_chrdev_region(sxe_cdev_major, SXE_MAX_DEVICES_NUM); + idr_destroy(&sxe_minor_idr); + + return; +} + +STATIC s32 sxe_get_minor(s32* dev_minor) +{ + s32 ret = -ENOMEM; + + mutex_lock(&sxe_minor_lock); + ret = idr_alloc(&sxe_minor_idr, NULL, 0, SXE_MAX_DEVICES_NUM, GFP_KERNEL); + if (ret >= 0) { + *dev_minor = ret; + ret = 0; + } + mutex_unlock(&sxe_minor_lock); + return ret; +} + +STATIC void sxe_free_minor(s32 dev_minor) +{ + mutex_lock(&sxe_minor_lock); + idr_remove(&sxe_minor_idr, dev_minor); + mutex_unlock(&sxe_minor_lock); +} + +s32 sxe_cli_cdev_create(struct sxe_adapter *adapter) +{ + s32 ret; + s32 dev_major, dev_minor; + struct sxe_pci_addr pci_addr; + + ret = sxe_get_minor(&dev_minor); + if (ret) { + LOG_ERROR("cdev minor get failed, ret=%d\n", ret); + ret = -ENOMEM; + goto l_get_minor_failed; + } + + dev_major = MAJOR(sxe_cdev_major); + adapter->cdev_info.dev_no = MKDEV(dev_major, dev_minor); + cdev_init(&adapter->cdev_info.cdev, &sxe_cdev_fops); + adapter->cdev_info.cdev.owner = THIS_MODULE; + adapter->cdev_info.cdev.ops = &sxe_cdev_fops; + + ret = cdev_add(&adapter->cdev_info.cdev, adapter->cdev_info.dev_no, 1); + if (ret) { + LOG_ERROR_BDF("failed to add cdev dev_no=%ld\n", + (unsigned long)adapter->cdev_info.dev_no); + goto l_add_cdev_failed; + } + + sxe_pci_addr_get(adapter->pdev, &pci_addr); + + adapter->cdev_info.device = device_create(sxe_cdev_class, NULL, + adapter->cdev_info.dev_no, NULL, + SXE_CHRDEV_NAME "-%04x:%02x:%02x.%x", pci_addr.domain, pci_addr.bus, + pci_addr.deviceNo, pci_addr.devfn); + if (IS_ERR(adapter->cdev_info.device)) { + ret = PTR_ERR(adapter->cdev_info.device); + LOG_ERROR_BDF("failed to create device, dev_no=%ld\n", + (unsigned long)adapter->cdev_info.dev_no); + goto l_create_dev_failed; + } + + LOG_INFO("create char dev[%p] dev_no[major:minor=%u:%u] on pci_dev[%p]" + " to net_dev[%p] belongs to class dev[%p] success\n", + &adapter->cdev_info.cdev, dev_major, dev_minor, + adapter->pdev, adapter->netdev, adapter->cdev_info.device); + + return SXE_SUCCESS; + +l_create_dev_failed: + cdev_del(&adapter->cdev_info.cdev); +l_add_cdev_failed: + sxe_free_minor(dev_minor); +l_get_minor_failed: + return ret; +} + +void sxe_cli_cdev_delete(struct sxe_adapter *adapter) +{ + s32 dev_minor; + + dev_minor = MINOR(adapter->cdev_info.dev_no); + sxe_free_minor(dev_minor); + + LOG_INFO("delete char dev[%p], dev_no[major:minor=%u:%u]\n", + &adapter->cdev_info.cdev, MAJOR(adapter->cdev_info.dev_no), dev_minor); + + device_destroy(sxe_cdev_class, adapter->cdev_info.dev_no); + cdev_del(&adapter->cdev_info.cdev); + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.h new file mode 100644 index 000000000000..2b6495f3b917 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_cli.h @@ -0,0 +1,33 @@ + +#ifndef __SXE_HOST_CLI_H__ +#define __SXE_HOST_CLI_H__ + +#include +#include +#include + +#include "sxe.h" +#include "sxe_cli.h" +#include "sxe_msg.h" +#include "drv_msg.h" + +#define SXE_CHRDEV_NAME "sxe-cli" +#define SXE_MAX_DEVICES_NUM (1U << MINORBITS) +#define SXE_CHRDEV_CLASS_NAME SXE_CHRDEV_NAME + +struct sxe_pci_addr { + s32 domain; + u8 bus; + u32 deviceNo; + u32 devfn; +}; + +s32 sxe_cli_cdev_register(void); + +void sxe_cli_cdev_unregister(void); + +s32 sxe_cli_cdev_create(struct sxe_adapter *adapter); + +void sxe_cli_cdev_delete(struct sxe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.c new file mode 100644 index 000000000000..516013d8f7e6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.c @@ -0,0 +1,982 @@ + +#include +#include +#include +#include + +#include "sxe.h" +#ifndef NO_NEED_SIGNAL_H +#include +#endif +#include "sxe_host_hdc.h" +#include "sxe_log.h" +#include "sxe_hw.h" +#include "sxe_msg.h" +#include "drv_msg.h" + + +static atomic_t hdc_available = ATOMIC_INIT(1); + +static DEFINE_PER_CPU(union sxe_trace_info, sxe_trace_id); + +#define TRACE_ID_CHIP_OUT_COUNT_MASK 0x000FFFFFFFFFFFFFLLU +#define TRACE_ID_CHIP_OUT_CPUID_MASK 0x7FFLLU + +#define SXE_HDC_RETRY_CNT (250) +#define SXE_HDC_RETRY_ITR (10) + +#define NS_TO_MS_UNIT (1000000) + +#ifdef DEFINE_SEMAPHORE_NEED_CNT +DEFINE_SEMAPHORE(g_hdc_sema, 1); +#else +DEFINE_SEMAPHORE(g_hdc_sema); +#endif + +static void sxe_trace_id_alloc(u64 *trace_id) +{ + union sxe_trace_info *id = NULL; + u64 trace_id_count = 0; + + preempt_disable(); + id = this_cpu_ptr(&sxe_trace_id); + + trace_id_count = id->sxe_trace_id_param.count; + ++trace_id_count; + id->sxe_trace_id_param.count = (trace_id_count & TRACE_ID_CHIP_OUT_COUNT_MASK); + + *trace_id = id->trace_id; + preempt_enable(); + + return; +} + +static void sxe_trace_id_init(void) +{ + s32 cpu = 0; + union sxe_trace_info *id = NULL; + + for_each_possible_cpu(cpu) { + id = &per_cpu(sxe_trace_id, cpu); + id->sxe_trace_id_param.cpu_id = + (cpu & TRACE_ID_CHIP_OUT_CPUID_MASK); + id->sxe_trace_id_param.count = 0; + } + + return; +} + +static s32 sxe_cli_fw_time_sync(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_driver_cmd cmd; + u64 timestamp = ktime_get_real_ns(); + struct sxe_hw *hw = &adapter->hw; + + timestamp = timestamp / NS_TO_MS_UNIT; + LOG_INFO_BDF("sync time= %llu ms\n", timestamp); + + cmd.req = ×tamp; + cmd.req_len = sizeof(timestamp); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_TINE_SYNC; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:time sync," + "failed count=%u\n", + ret, adapter->hdc_ctxt.time_sync_failed); + adapter->hdc_ctxt.time_sync_failed++; + } + + return ret; +} + +s32 sxe_host_to_fw_time_sync(struct sxe_adapter *adapter) +{ + s32 ret = 0; + s32 ret_v; + u32 status; + struct sxe_hw *hw = &adapter->hw; + + status = hw->hdc.ops->fw_status_get(hw); + if (status != SXE_FW_START_STATE_FINISHED) { + LOG_ERROR_BDF("fw[%p] status[0x%x] is not good," + "and time_sync_failed=%u\n", + hw, status, adapter->hdc_ctxt.time_sync_failed); + adapter->hdc_ctxt.time_sync_failed++; + ret = -SXE_FW_STATUS_ERR; + goto l_ret; + } + + ret_v = sxe_cli_fw_time_sync(adapter); + if (ret_v) { + LOG_WARN_BDF("fw time sync failed, ret_v=%d\n",ret_v); + goto l_ret; + } + +l_ret: + return ret; +} + +void sxe_time_sync_handler(struct work_struct *work) +{ + s32 ret; + struct sxe_adapter *adapter = container_of(work, + struct sxe_adapter, hdc_ctxt.time_sync_work); + ret = sxe_host_to_fw_time_sync(adapter); + if (ret) { + LOG_ERROR_BDF("time sync handler err, ret=%d\n", ret); + } + + return; +} + +struct semaphore *sxe_hdc_sema_get() +{ + return &g_hdc_sema; +} + +void sxe_hdc_available_set(s32 value) +{ + atomic_set(&hdc_available, value); + return; +} + +void sxe_hdc_channel_init(struct sxe_hdc_context *hdc_ctxt) +{ + sxe_trace_id_init(); + + init_completion(&hdc_ctxt->sync_done); + + INIT_WORK(&hdc_ctxt->time_sync_work, sxe_time_sync_handler); + + sxe_hdc_available_set(1); + hdc_ctxt->time_sync_failed = 0; + return; +} + +void sxe_hdc_channel_destroy(struct sxe_hw *hw) +{ + sxe_hdc_available_set(0); + hw->hdc.ops->resource_clean(hw); + return; +} + +static inline s32 sxe_hdc_lock_get(struct sxe_hw *hw) +{ + s32 ret = SXE_HDC_FALSE; + struct sxe_adapter *adapter = hw->adapter; + + if (atomic_read(&hdc_available)) { + ret = hw->hdc.ops->pf_lock_get(hw, SXE_HDC_TRYLOCK_MAX); + } else { + LOG_ERROR_BDF("hdc channel not available\n"); + } + + return ret; +} + +static inline void sxe_hdc_lock_release(struct sxe_hw *hw) +{ + hw->hdc.ops->pf_lock_release(hw, SXE_HDC_RELEASELOCK_MAX); + return; +} + +static inline s32 sxe_poll_fw_ack(struct sxe_hw *hw, u32 timeout, + bool is_interruptible) +{ + s32 ret = 0; + u32 i; + bool fw_ov = false; + struct sxe_adapter *adapter = hw->adapter; + + if (atomic_read(&hdc_available)) { + for (i = 0; i < timeout; i++) { + fw_ov = hw->hdc.ops->is_fw_over_set(hw); + if (fw_ov) { + break; + } + + if (is_interruptible) { + if (msleep_interruptible(SXE_HDC_RETRY_ITR)){ + ret = -EINTR; + LOG_DEV_INFO("interrupted, exit polling\n"); + goto l_ret; + } + } else { + msleep(SXE_HDC_RETRY_ITR); + } + } + + if (i >= timeout) { + LOG_ERROR_BDF("poll fw_ov timeout...\n"); + ret = -SXE_ERR_HDC_FW_OV_TIMEOUT; + goto l_ret; + } + + hw->hdc.ops->fw_ov_clear(hw); + ret = 0; + } else { + ret = SXE_HDC_FALSE; + LOG_ERROR_BDF("hdc channel not available\n"); + } + +l_ret: + return ret; +} + +#ifdef SXE_NEED_PROCESS_CANCEL +static inline bool is_interrupt_signal(struct task_struct *task) +{ + bool is_inter = false; + + if (sigismember(&task->pending.signal, SIGINT) || + sigismember(&task->pending.signal, SIGKILL) || + sigismember(&task->pending.signal, SIGQUIT)) { + is_inter = true; + goto l_ret; + } + +l_ret: + return is_inter; +} +#endif + +void sxe_hdc_irq_handler(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u32 irq_event = hw->hdc.ops->irq_event_get(hw); + + hw->irq.ops->specific_irq_disable(hw, SXE_EIMC_HDC); + + LOG_DEBUG_BDF("hdc irq interrupt coming\n"); + if (irq_event & MSI_EVT_HDC_FWOV) { + LOG_DEBUG_BDF("hdc fw over event occur\n"); + hw->hdc.ops->irq_event_clear(hw, MSI_EVT_HDC_FWOV); + + hw->hdc.ops->fw_ov_clear(hw); + complete(&adapter->hdc_ctxt.sync_done); + } + + if (irq_event & MSI_EVT_HDC_TIME_SYNC) { + LOG_DEBUG_BDF("hdc fw sync time event occur\n"); + + hw->hdc.ops->irq_event_clear(hw, MSI_EVT_HDC_TIME_SYNC); + schedule_work(&adapter->hdc_ctxt.time_sync_work); + } + + return; +} + +static s32 sxe_wait_fw_ack(struct sxe_hw *hw, u64 trace_id) +{ + s32 ret; + struct sxe_adapter *adapter = container_of(hw, struct sxe_adapter, hw); + + while(1) { + ret = wait_for_completion_interruptible(&adapter->hdc_ctxt.sync_done); + if (ret == 0) { + LOG_DEBUG_BDF("cmd trace=0x%llx, " + "wait_for_completion_interrupted success\n", + trace_id); + break; + } + + ret = signal_pending(current); + if(!ret) { + LOG_DEBUG_BDF("cmd trace=0x%llx, no pending signal," + "continue wait",trace_id); + continue; + } else { + LOG_DEBUG_BDF("cmd trace=0x%llx got signal, default quit\n", + trace_id); + ret = -EINTR; + break; + } + +#ifdef SXE_NEED_PROCESS_CANCEL + ret = is_interrupt_signal(current); + if (ret) { + LOG_DEBUG_BDF("cmd trace=0x%llx interrupted, need cancel\n", + trace_id); + ret = -EINTR; + break; + } else { + LOG_DEBUG_BDF("cmd trace=0x%llx got other signal, ignore\n", + trace_id); + } +#endif + } + + return ret; +} + +static s32 hdc_packet_ack_get(struct sxe_hw *hw, u64 trace_id, + HdcHeader_u *pkt_header, + bool use_msi, bool is_interruptible) +{ + s32 ret = 0; + u32 timeout = SXE_HDC_WAIT_TIME; + struct sxe_adapter *adapter = hw->adapter; + + pkt_header->dw0 = 0; + pkt_header->head.errCode = PKG_ERR_OTHER; + + LOG_INFO_BDF("trace_id=0x%llx hdc cmd ack get start, mode=%s\n", + trace_id, use_msi ? "msi inter":"polling"); + if (use_msi) { + ret = sxe_wait_fw_ack(hw, trace_id); + }else { + ret = sxe_poll_fw_ack(hw, timeout, is_interruptible); + } + + if (ret) { + LOG_ERROR_BDF("get fw ack failed, mode=%s ret=%d\n", + use_msi ? "msi inter":"polling", ret); + goto l_out; + } + + pkt_header->dw0 = hw->hdc.ops->fw_ack_header_rcv(hw);; + if (pkt_header->head.errCode == PKG_ERR_PKG_SKIP) { + ret = -SXE_HDC_PKG_SKIP_ERR; + goto l_out; + } else if (pkt_header->head.errCode != PKG_OK) { + ret = -SXE_HDC_PKG_OTHER_ERR; + goto l_out; + } + +l_out: + LOG_INFO_BDF("trace_id=0x%llx hdc cmd ack get end ret=%d\n", trace_id, ret); + return ret; +} + +static void hdc_packet_header_fill(HdcHeader_u *pkt_header, + u8 pkt_index, u16 total_len, + u16 pkt_num, u8 is_read, bool use_msi) +{ + U16 pkt_len = 0; + + pkt_header->dw0 = 0; + + pkt_header->head.pid = (is_read == 0) ? pkt_index : (pkt_index - 1); + + pkt_header->head.totalLen = SXE_HDC_LEN_TO_REG(total_len); + + if (pkt_index == 0 && is_read == 0) { + pkt_header->head.startPkg = SXE_HDC_BIT_1; + } + + if (pkt_index == (pkt_num - 1)) { + pkt_header->head.endPkg = SXE_HDC_BIT_1; + pkt_len = total_len - (DWORD_NUM * (pkt_num - 1)); + } else { + pkt_len = DWORD_NUM; + } + + pkt_header->head.len = SXE_HDC_LEN_TO_REG(pkt_len); + pkt_header->head.isRd = is_read; + if (use_msi) { + pkt_header->head.msi = 1; + } + + return ; +} + +static inline void hdc_channel_clear(struct sxe_hw *hw) +{ + hw->hdc.ops->fw_ov_clear(hw); +} + +static inline void hdc_packet_send_done(struct sxe_hw *hw) +{ + hw->hdc.ops->packet_send_done(hw); + return; +} + +static inline void hdc_packet_header_send(struct sxe_hw *hw, + u32 header) +{ + hw->hdc.ops->packet_header_send(hw, header); + return; +} + +static inline void hdc_packet_data_dword_send(struct sxe_hw *hw, + u16 dword_index, u32 value) +{ + hw->hdc.ops->packet_data_dword_send(hw, dword_index, value); + return; +} + +static void hdc_packet_send(struct sxe_hw *hw, u64 trace_id, + HdcHeader_u *pkt_header, u8 *data, + u16 data_len) +{ + u16 dw_idx = 0; + u16 pkt_len = 0; + u16 offset = 0; + u32 pkg_data = 0; + struct sxe_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("hw_addr[%p] trace_id=0x%llx send pkt pkg_header[0x%x], " + "data_addr[%p], data_len[%u]\n", + hw, trace_id, pkt_header->dw0, data, data_len); + + hdc_packet_header_send(hw, pkt_header->dw0); + + if (data == NULL || data_len == 0) { + goto l_send_done; + } + + pkt_len = SXE_HDC_LEN_FROM_REG(pkt_header->head.len); + for (dw_idx = 0; dw_idx < pkt_len; dw_idx++) { + pkg_data = 0; + + offset = dw_idx * BYTE_PER_DWORD; + + if ((pkt_header->head.endPkg == SXE_HDC_BIT_1) + && (dw_idx == (pkt_len - 1)) + && (data_len % BYTE_PER_DWORD != 0)) { + memcpy((u8 *)&pkg_data, data + offset, + data_len % BYTE_PER_DWORD); + } else { + pkg_data = *(u32 *)(data + offset); + } + + LOG_DEBUG_BDF("trace_id=0x%llx send data to reg[%u] dword[0x%x]\n", + trace_id, dw_idx, pkg_data); + hdc_packet_data_dword_send(hw, dw_idx, pkg_data); + } + +l_send_done: + hdc_channel_clear(hw); + + hdc_packet_send_done(hw); + + return; +} + +static inline u32 hdc_packet_data_dword_rcv(struct sxe_hw *hw, + u16 dword_index) +{ + return hw->hdc.ops->packet_data_dword_rcv(hw, dword_index); +} + +static void hdc_resp_data_rcv(struct sxe_hw *hw, u64 trace_id, + HdcHeader_u *pkt_header, u8 *out_data, + u16 out_len) +{ + u16 dw_idx = 0; + u16 dw_num = 0; + u16 offset = 0; + u32 pkt_data; + struct sxe_adapter *adapter = hw->adapter; + + dw_num = SXE_HDC_LEN_FROM_REG(pkt_header->head.len); + for (dw_idx = 0; dw_idx < dw_num; dw_idx++) { + pkt_data= hdc_packet_data_dword_rcv(hw, dw_idx); + offset = dw_idx * BYTE_PER_DWORD; + LOG_DEBUG_BDF("trace_id=0x%llx get data from reg[%u] dword=0x%x\n", + trace_id, dw_idx, pkt_data); + + if ((pkt_header->head.endPkg == SXE_HDC_BIT_1) + && (dw_idx == (dw_num - 1)) && (out_len % BYTE_PER_DWORD != 0)) { + memcpy(out_data + offset, (u8 *)&pkt_data, + out_len % BYTE_PER_DWORD); + } else { + *(u32 *)(out_data + offset) = pkt_data; + } + } + + return; +} + +STATIC s32 hdc_req_process(struct sxe_hw *hw, u64 trace_id, + u8 *in_data, u16 in_len, bool use_msi, bool is_interruptible) +{ + s32 ret = 0; + u32 total_len = 0; + u16 pkt_num = 0; + u16 index = 0; + u16 offset = 0; + HdcHeader_u pkt_header; + bool is_retry = false; + struct sxe_adapter *adapter = hw->adapter; + + total_len = (in_len + BYTE_PER_DWORD - 1) / BYTE_PER_DWORD; + + pkt_num = (in_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX; + LOG_DEBUG_BDF("hw[%p] trace_id=0x%llx req in_data[%p] in_len=%u, " + "total_len=%uDWORD, pkt_num = %u, mode=%s\n", + hw, trace_id, in_data, in_len, total_len, + pkt_num, use_msi ? "msi":"polling"); + + for (index = 0; index < pkt_num; index++) { + LOG_DEBUG_BDF("trace_id=0x%llx fill pkg header[%p], pkg_index[%u], " + "total_Len[%u], pkg_num[%u], is_read[no]\n", + trace_id, &pkt_header, index, total_len, pkt_num); + hdc_packet_header_fill(&pkt_header, index, total_len, + pkt_num, 0, use_msi); + + offset = index * DWORD_NUM * BYTE_PER_DWORD; + hdc_packet_send(hw, trace_id, &pkt_header, + in_data + offset, in_len); + + if (index == pkt_num - 1) { + break; + } + + ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, + use_msi, is_interruptible); + if (ret == -EINTR) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n", trace_id); + goto l_out; + } else if (ret == -SXE_HDC_PKG_SKIP_ERR) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req ack" + "failed, retry\n", trace_id); + if (is_retry) { + ret = -SXE_HDC_RETRY_ERR; + goto l_out; + } + + index --; + is_retry = true; + continue; + } else if (ret != SXE_HDC_SUCCESS) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req ack" + "failed, ret=%d\n", trace_id, ret); + ret = -SXE_HDC_RETRY_ERR; + goto l_out; + } + + LOG_DEBUG_BDF("hdc cmd trace_id=0x%llx get req packet_index[%u]" + " ack succeed header[0x%x]\n", + trace_id, index, pkt_header.dw0); + is_retry = false; + } + +l_out: + return ret; +} + +static s32 hdc_resp_process(struct sxe_hw *hw, u64 trace_id, + u8 *out_data, u16 out_len, bool use_msi, bool is_interruptible) +{ + s32 ret; + u32 req_dwords; + u32 resp_len; + u32 resp_dwords; + u16 pkt_num; + u16 index; + u16 offset; + HdcHeader_u pkt_header; + bool retry = false; + struct sxe_adapter *adapter = hw->adapter; + + LOG_INFO_BDF("hdc trace_id=0x%llx req's last cmd ack get, mode=%s\n", + trace_id, use_msi ? "msi" : "polling"); + ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, + use_msi, is_interruptible); + if (ret == -EINTR) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n", trace_id); + goto l_out; + } else if(ret) { + LOG_ERROR_BDF("hdc trace_id=0x%llx ack get failed, ret=%d\n", + trace_id, ret); + ret = -SXE_HDC_RETRY_ERR; + goto l_out; + } + + LOG_INFO_BDF("hdc trace_id=0x%llx req's last cmd ack get" + "succeed header[0x%x]\n",trace_id, pkt_header.dw0); + + if (!pkt_header.head.startPkg) { + ret = -SXE_HDC_RETRY_ERR; + LOG_ERROR_BDF("trace_id=0x%llx ack header has error: " + "not set start bit\n",trace_id); + goto l_out; + } + + req_dwords = (out_len + BYTE_PER_DWORD - 1) / BYTE_PER_DWORD; + resp_dwords = SXE_HDC_LEN_FROM_REG(pkt_header.head.totalLen); + if (resp_dwords > req_dwords) { + ret = -SXE_HDC_RETRY_ERR; + LOG_ERROR_BDF("trace_id=0x%llx rsv len check failed:" + "resp_dwords=%u, req_dwords=%u\n",trace_id, + resp_dwords, req_dwords); + goto l_out; + } + + resp_len = resp_dwords << DWORD_TO_BYTE_SHIFT; + LOG_INFO_BDF("outlen = %u bytes, resp_len = %u bytes\n", out_len, resp_len); + if (resp_len > out_len) { + resp_len = out_len; + } + + hdc_resp_data_rcv(hw, trace_id, &pkt_header, out_data, resp_len); + + pkt_num = (resp_len + ONE_PACKET_LEN_MAX - 1) / ONE_PACKET_LEN_MAX; + for (index = 1; index < pkt_num; index++) { + LOG_DEBUG_BDF("trace_id=0x%llx fill pkg header[%p], pkg_index[%u], " + "total_Len[%u], pkg_num[%u], is_read[yes] use_msi=%s\n", + trace_id, &pkt_header, index, resp_dwords, + pkt_num, use_msi ? "yes" : "no"); + hdc_packet_header_fill(&pkt_header, index, resp_dwords, + pkt_num, 1, use_msi); + + hdc_packet_send(hw, trace_id, &pkt_header, NULL, 0); + + ret = hdc_packet_ack_get(hw, trace_id, &pkt_header, + use_msi, is_interruptible); + if (ret == -EINTR) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx interrupted\n", trace_id); + goto l_out; + } else if (ret == -SXE_HDC_PKG_SKIP_ERR) { + LOG_ERROR_BDF("trace_id=0x%llx hdc resp ack polling" + "failed, ret=%d\n", trace_id, ret); + if (retry) { + ret = -SXE_HDC_RETRY_ERR; + goto l_out; + } + + index --; + retry = true; + continue; + } else if (ret != SXE_HDC_SUCCESS) { + LOG_ERROR_BDF("trace_id=0x%llx hdc resp ack polling" + "failed, ret=%d\n",trace_id, ret); + ret = -SXE_HDC_RETRY_ERR; + goto l_out; + } + + LOG_INFO_BDF("hdc trace_id=0x%llx resp pkt[%u] get " + "succeed header[0x%x]\n", + trace_id, index, pkt_header.dw0); + + retry = false; + + offset = index * DWORD_NUM * BYTE_PER_DWORD; + hdc_resp_data_rcv(hw, trace_id, &pkt_header, + out_data + offset, resp_len); + } + +l_out: + return ret; +} + +static s32 sxe_hdc_packet_trans(struct sxe_hw *hw, u64 trace_id, + struct sxe_hdc_trans_info *trans_info, + bool use_msi, bool is_interruptible) +{ + s32 ret = SXE_SUCCESS; + u32 status; + struct sxe_adapter *adapter = hw->adapter; + u32 channel_state; + + status = hw->hdc.ops->fw_status_get(hw); + if (status != SXE_FW_START_STATE_FINISHED) { + LOG_ERROR_BDF("fw[%p] status[0x%x] is not good\n",hw, status); + ret = -SXE_FW_STATUS_ERR; + goto l_ret; + } + + channel_state = hw->hdc.ops->channel_state_get(hw); + if (channel_state != SXE_FW_HDC_TRANSACTION_IDLE) { + LOG_ERROR_BDF("hdc channel state is busy\n"); + ret = -SXE_HDC_RETRY_ERR; + goto l_ret; + } + + ret = sxe_hdc_lock_get(hw); + if (ret) { + LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx get hdc lock fail, ret=%d\n", + hw, trace_id, ret); + ret = -SXE_HDC_RETRY_ERR; + goto l_ret; + } + + ret = hdc_req_process(hw, trace_id, trans_info->in.data, + trans_info->in.len, use_msi, is_interruptible); + if (ret) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx req process" + "failed, ret=%d\n",trace_id, ret); + goto l_hdc_lock_release; + } + + ret = hdc_resp_process(hw, trace_id, trans_info->out.data, + trans_info->out.len, use_msi, is_interruptible); + if (ret) { + LOG_ERROR_BDF("hdc cmd trace_id=0x%llx resp process" + "failed, ret=%d\n",trace_id, ret); + } + +l_hdc_lock_release: + sxe_hdc_lock_release(hw); +l_ret: + return ret; +} + +STATIC s32 sxe_hdc_cmd_process(struct sxe_hw *hw, u64 trace_id, + struct sxe_hdc_trans_info *trans_info, + bool use_msi, bool is_interruptible) +{ + s32 ret; + u8 retry_idx; + struct sxe_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("hw[%p] %s cmd trace=0x%llx get use sema = %p, count=%u\n",hw, + use_msi ? "driver" : "user", trace_id, + sxe_hdc_sema_get(), sxe_hdc_sema_get()->count); + if (is_interruptible) { + ret = down_interruptible(sxe_hdc_sema_get()); + if (ret) { + ret = -EINTR; + LOG_WARN_BDF("hw[%p] hdc concurrency full\n", hw); + goto l_ret; + } + } else { + down(sxe_hdc_sema_get()); + } + + for (retry_idx = 0; retry_idx < SXE_HDC_RETRY_CNT; retry_idx++ ) { + ret = sxe_hdc_packet_trans(hw, trace_id, trans_info, + use_msi, is_interruptible); + if (ret == SXE_SUCCESS) { + goto l_up; + } else if (ret == -SXE_HDC_RETRY_ERR) { + if (is_interruptible) { + if (msleep_interruptible(SXE_HDC_RETRY_ITR)) { + ret = -EINTR; + LOG_ERROR_BDF("interrupted, exit polling\n"); + goto l_up; + } + } else { + msleep(SXE_HDC_RETRY_ITR); + } + + continue; + } else { + LOG_ERROR_BDF("sxe hdc packet trace_id=0x%llx" + "trans error, ret=%d\n", trace_id, ret); + ret = -EFAULT; + goto l_up; + } + } + +l_up: + LOG_DEBUG_BDF("hw[%p] %s cmd trace=0x%llx up sema = %p, count=%u\n",hw, + use_msi ? "driver" : "user", trace_id, + sxe_hdc_sema_get(), sxe_hdc_sema_get()->count); + up(sxe_hdc_sema_get()); +l_ret: + if (ret == -SXE_HDC_RETRY_ERR) { + ret = -EFAULT; + } + return ret; +} + +static void sxe_cmd_hdr_init(struct sxe_hdc_cmd_hdr *cmd_hdr, + u8 cmd_type) +{ + cmd_hdr->cmd_type = cmd_type; + cmd_hdr->cmd_sub_type = 0; + return; +} + +static void sxe_driver_cmd_msg_init(struct sxe_hdc_drv_cmd_msg *msg, + u16 opcode, u64 trace_id, + void *req_data, u16 req_len) +{ + LOG_DEBUG("cmd[opcode=0x%x], trace=0x%llx, req_data_len=%u start init\n", + opcode, trace_id, req_len); + msg->opcode = opcode; + msg->length.req_len = SXE_HDC_MSG_HDR_SIZE + req_len; + msg->traceid = trace_id; + + if (req_data && req_len != 0) { + memcpy(msg->body, (u8 *)req_data, req_len); + } + + return; +} + +static void sxe_hdc_trans_info_init( + struct sxe_hdc_trans_info *trans_info, + u8 *in_data_buf, u16 in_len, + u8 *out_data_buf, u16 out_len) +{ + trans_info->in.data = in_data_buf; + trans_info->in.len = in_len; + trans_info->out.data = out_data_buf; + trans_info->out.len = out_len; + return; +} + +s32 sxe_driver_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd) +{ + s32 ret = SXE_SUCCESS; + struct sxe_hdc_cmd_hdr *cmd_hdr; + struct sxe_hdc_drv_cmd_msg *msg; + struct sxe_hdc_drv_cmd_msg *ack; + struct sxe_hdc_trans_info trans_info; + struct sxe_adapter *adapter = hw->adapter; + void *req_data = cmd->req, *resp_data = cmd->resp; + u16 opcode = cmd->opcode, req_len = cmd->req_len, resp_len = cmd->resp_len; + + u8 *in_data_buf; + u8 *out_data_buf; + u16 in_len; + u16 out_len; + u64 trace_id = 0; + u16 ack_data_len; + + in_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + req_len; + out_len = SXE_HDC_CMD_HDR_SIZE + SXE_HDC_MSG_HDR_SIZE + resp_len; + + sxe_trace_id_alloc(&trace_id); + + in_data_buf = kzalloc(in_len, GFP_KERNEL); + if (in_data_buf == NULL) { + LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc indata" + "mem len[%u] failed\n",trace_id, in_len); + ret = -ENOMEM; + goto l_ret; + } + + out_data_buf = kzalloc(out_len, GFP_KERNEL); + if (out_data_buf == NULL) { + LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc out_data" + "mem len[%u] failed\n",trace_id, out_len); + ret = -ENOMEM; + goto l_in_buf_free; + } + + cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf; + sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_DRV); + + msg = (struct sxe_hdc_drv_cmd_msg *)((u8 *)in_data_buf + SXE_HDC_CMD_HDR_SIZE); + sxe_driver_cmd_msg_init(msg, opcode, trace_id, req_data, req_len); + + LOG_DEBUG_BDF("trans drv cmd:trace_id=0x%llx, opcode[0x%x], " + "inlen=%u, out_len=%u\n", + trace_id, opcode, in_len, out_len); + + sxe_hdc_trans_info_init(&trans_info, + in_data_buf, in_len, + out_data_buf, out_len); + + ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info, false, + cmd->is_interruptible); + if (ret) { + LOG_DEV_DEBUG("hdc cmd[0x%x] trace_id=0x%llx process" + "failed, ret=%d\n",opcode, trace_id, ret); + goto l_out_buf_free; + } + + ack = (struct sxe_hdc_drv_cmd_msg *)((u8 *)out_data_buf + SXE_HDC_CMD_HDR_SIZE); + + if (ack->errcode) { + LOG_DEV_DEBUG("driver get hdc ack failed trace_id=0x%llx, err=%d\n", + trace_id, ack->errcode); + ret = -SXE_ERR_CLI_FAILED; + goto l_out_buf_free; + } + + ack_data_len = ack->length.ack_len - SXE_HDC_MSG_HDR_SIZE; + if (resp_len != ack_data_len) { + LOG_DEV_DEBUG("ack trace_id=0x%llx data len[%u]" + " and resp_len[%u] dont match\n", + trace_id, ack_data_len, resp_len); + ret = -SXE_ERR_CLI_FAILED; + goto l_out_buf_free; + } + + if (resp_len != 0) { + memcpy(resp_data, ack->body, resp_len); + } + + LOG_DEBUG_BDF("driver get hdc ack trace_id=0x%llx," + "ack_len=%u, ack_data_len=%u\n", + trace_id, ack->length.ack_len, ack_data_len); + +l_out_buf_free: + kfree(out_data_buf); +l_in_buf_free: + kfree(in_data_buf); +l_ret: + return ret; +} + +s32 sxe_cli_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd) +{ + s32 ret = SXE_SUCCESS; + struct sxe_hdc_cmd_hdr *cmd_hdr; + struct sxe_hdc_trans_info trans_info; + struct sxe_adapter *adapter = hw->adapter; + u64 trace_id = cmd->trace_id; + u16 in_len = cmd->req_len, out_len = cmd->resp_len; + u8 *in_data = cmd->req; + u8 *out_data = cmd->resp; + + u8 *in_data_buf; + u8 *out_data_buf; + u16 in_buf_len = in_len + SXE_HDC_CMD_HDR_SIZE; + u16 out_buf_len = out_len + SXE_HDC_CMD_HDR_SIZE; + + in_data_buf = kzalloc(in_buf_len, GFP_KERNEL); + if (in_data_buf == NULL) { + LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc indata" + "mem len[%u] failed\n",trace_id, in_buf_len); + ret = -ENOMEM; + goto l_ret; + } + + out_data_buf = kzalloc(out_buf_len, GFP_KERNEL); + if (out_data_buf == NULL) { + LOG_ERROR_BDF("cmd trace_id=0x%llx kzalloc out_data" + "mem len[%u] failed\n",trace_id, out_buf_len); + ret = -ENOMEM; + goto l_in_buf_free; + } + + if (copy_from_user(in_data_buf + SXE_HDC_CMD_HDR_SIZE, + (void __user*)in_data, in_len)) { + LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx copy from user err\n", + hw, trace_id); + ret = -EFAULT; + goto l_out_buf_free; + } + + cmd_hdr = (struct sxe_hdc_cmd_hdr *)in_data_buf; + sxe_cmd_hdr_init(cmd_hdr, SXE_CMD_TYPE_CLI); + + LOG_DEBUG_BDF("trans cli cmd:trace_id=0x%llx,, inlen=%u, out_len=%u\n", + trace_id, in_len, out_len); + sxe_hdc_trans_info_init(&trans_info, + in_data_buf, in_buf_len, + out_data_buf, out_buf_len); + + ret = sxe_hdc_cmd_process(hw, trace_id, &trans_info, false, + cmd->is_interruptible); + if (ret) { + LOG_DEV_DEBUG("hdc cmd trace_id=0x%llx hdc packet trans" + "failed, ret=%d\n",trace_id, ret); + goto l_out_buf_free; + } + + if (copy_to_user((void __user*)out_data, + out_data_buf + SXE_HDC_CMD_HDR_SIZE, out_len)) { + LOG_ERROR_BDF("hw[%p] cmd trace_id=0x%llx copy to user err\n", + hw, trace_id); + ret = -EFAULT; + } + +l_out_buf_free: + kfree(out_data_buf); +l_in_buf_free: + kfree(in_data_buf); +l_ret: + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.h new file mode 100644 index 000000000000..4c8b4d623259 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_host_hdc.h @@ -0,0 +1,82 @@ +#ifndef __SXE_HOST_HDC_H__ +#define __SXE_HOST_HDC_H__ + +#include "sxe_hdc.h" +#include "sxe_hw.h" +#include "sxe.h" + +#define SXE_SUCCESS (0) +#define SXE_FAILED (512) + +#define SXE_HDC_SUCCESS 0 +#define SXE_HDC_FALSE SXE_ERR_HDC(1) +#define SXE_HDC_INVAL_PARAM SXE_ERR_HDC(2) +#define SXE_HDC_BUSY SXE_ERR_HDC(3) +#define SXE_HDC_FW_OPS_FAILED SXE_ERR_HDC(4) +#define SXE_HDC_FW_OV_TIMEOUT SXE_ERR_HDC(5) +#define SXE_HDC_REQ_ACK_HEAD_ERR SXE_ERR_HDC(6) +#define SXE_HDC_REQ_ACK_TLEN_ERR SXE_ERR_HDC(7) +#define SXE_HDC_PKG_SKIP_ERR SXE_ERR_HDC(8) +#define SXE_HDC_PKG_OTHER_ERR SXE_ERR_HDC(9) +#define SXE_HDC_RETRY_ERR SXE_ERR_HDC(10) +#define SXE_FW_STATUS_ERR SXE_ERR_HDC(11) + +#define SXE_HDC_TRYLOCK_MAX 200 + +#define SXE_HDC_RELEASELOCK_MAX 20 + +#define SXE_HDC_TEST_POLL_LOCK_MAX 10 +#define SXE_HDC_WAIT_TIME 200 + +#define SXE_HDC_BIT_1 0x1 + +#define BYTE_PER_DWORD (4) +#define DWORD_TO_BYTE_SHIFT (2) + +union sxe_trace_info { + u64 trace_id; + struct { + U64 count : 53; + U64 cpu_id : 11; + } sxe_trace_id_param; +}; + +struct sxe_hdc_data_info { + u8 *data; + u16 len; +}; + +struct sxe_hdc_trans_info { + struct sxe_hdc_data_info in; + struct sxe_hdc_data_info out; +}; + +struct sxe_driver_cmd { + void *req; + void *resp; + u64 trace_id; + bool is_interruptible; + u16 opcode; + u16 req_len; + u16 resp_len; +}; + +s32 sxe_driver_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd); + +s32 sxe_cli_cmd_trans(struct sxe_hw *hw, struct sxe_driver_cmd *cmd); + +void sxe_hdc_channel_init(struct sxe_hdc_context *hdc_ctxt); + +struct semaphore *sxe_hdc_sema_get(void); + +void sxe_hdc_irq_handler(struct sxe_adapter *adapter); + +s32 sxe_host_to_fw_time_sync(struct sxe_adapter *adapter); + +void sxe_hdc_channel_destroy(struct sxe_hw *hw); + +void sxe_hdc_available_set(s32 value); + +void sxe_time_sync_handler(struct work_struct *work); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.c new file mode 100644 index 000000000000..621458701093 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.c @@ -0,0 +1,6652 @@ +#ifdef SXE_PHY_CONFIGURE +#include +#endif +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#include "sxe_pci.h" +#include "sxe_log.h" +#include "sxe_debug.h" +#include "sxe_host_hdc.h" +#include "sxe_sriov.h" +#include "sxe_compat.h" +#else +#include "sxe_errno.h" +#include "sxe_logs.h" +#include "sxe.h" + +#include "sxe_hw.h" +#endif + + +#define SXE_PFMSG_MASK (0xFF00) + +#define SXE_MSGID_MASK (0xFFFFFFFF) + +#define SXE_CTRL_MSG_MASK (0x700) + +#define SXE_RING_WAIT_LOOP 10 +#define SXE_REG_NAME_LEN 16 +#define SXE_DUMP_REG_STRING_LEN 73 +#define SXE_DUMP_REGS_NUM 64 +#define SXE_MAX_RX_DESC_POLL 10 +#define SXE_LPBK_EN 0x00000001 +#define SXE_MACADDR_LOW_4_BYTE 4 +#define SXE_MACADDR_HIGH_2_BYTE 2 +#define SXE_RSS_FIELD_MASK 0xffff0000 +#define SXE_MRQE_MASK 0x0000000f + +#define SXE_HDC_DATA_LEN_MAX 256 + +#define SXE_8_TC_MSB (0x11111111) + +STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg); +STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value); +static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value); + +#define SXE_WRITE_REG_ARRAY_32(a, reg, offset, value) \ + sxe_write_reg(a, reg + (offset << 2), value) +#define SXE_READ_REG_ARRAY_32(a, reg, offset) \ + sxe_read_reg(a, reg + (offset << 2)) + +#define SXE_REG_READ(hw, addr) sxe_read_reg(hw, addr) +#define SXE_REG_WRITE(hw, reg, value) sxe_write_reg(hw, reg, value) +#define SXE_WRITE_FLUSH(a) sxe_read_reg(a, SXE_STATUS) +#define SXE_REG_WRITE_ARRAY(hw, reg, offset, value) \ + sxe_write_reg(hw, (reg) + ((offset) << 2), (value)) + +#define SXE_SWAP_32(_value) __swab32((_value)) + +#define SXE_REG_WRITE_BE32(a, reg, value) \ + SXE_REG_WRITE((a), (reg), SXE_SWAP_32(ntohl(value))) + +#define SXE_SWAP_16(_value) __swab16((_value)) + +#define SXE_REG64_WRITE(a, reg, value) sxe_write_reg64((a), (reg), (value)) + +enum sxe_ipsec_table { + SXE_IPSEC_IP_TABLE = 0, + SXE_IPSEC_SPI_TABLE, + SXE_IPSEC_KEY_TABLE, +}; + +u32 mac_regs[] = { + SXE_COMCTRL, + SXE_PCCTRL, + SXE_LPBKCTRL, + SXE_MAXFS, + SXE_VLANCTRL, + SXE_VLANID, + SXE_LINKS, + SXE_HLREG0, + SXE_MFLCN, + SXE_MACC, +}; + +u16 sxe_mac_reg_num_get(void) +{ + return ARRAY_SIZE(mac_regs); +} + + +#ifndef SXE_DPDK + +void sxe_hw_fault_handle(struct sxe_hw *hw) +{ + struct sxe_adapter *adapter = hw->adapter; + + if (test_bit(SXE_HW_FAULT, &hw->state)) { + goto l_ret; + } + + set_bit(SXE_HW_FAULT, &hw->state); + + LOG_DEV_ERR("sxe nic hw fault\n"); + + if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) { + hw->fault_handle(hw->priv); + } + +l_ret: + return; +} + +static u32 sxe_hw_fault_check(struct sxe_hw *hw, u32 reg) +{ + u32 i, value; + u8 __iomem *base_addr = hw->reg_base_addr; + struct sxe_adapter *adapter = hw->adapter; + + if (sxe_is_hw_fault(hw)) { + goto l_out; + } + + for (i = 0; i < SXE_REG_READ_RETRY; i++) { + value = hw->reg_read(base_addr + SXE_STATUS); + if (value != SXE_REG_READ_FAIL) { + break; + } + + mdelay(3); + } + + if (SXE_REG_READ_FAIL == value) { + LOG_ERROR_BDF("read registers multiple times failed, ret=%#x\n", value); + sxe_hw_fault_handle(hw); + } else { + value = hw->reg_read(base_addr + reg); + } + + return value; +l_out: + return SXE_REG_READ_FAIL; +} + +STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg) +{ + u32 value; + u8 __iomem *base_addr = hw->reg_base_addr; + struct sxe_adapter *adapter = hw->adapter; + + if (sxe_is_hw_fault(hw)) { + value = SXE_REG_READ_FAIL; + goto l_ret; + } + + value = hw->reg_read(base_addr + reg); + if (unlikely(SXE_REG_READ_FAIL == value)) { + LOG_ERROR_BDF("reg[0x%x] read failed, ret=%#x\n", reg, value); + value = sxe_hw_fault_check(hw, reg); + } + +l_ret: + return value; +} + +STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *base_addr = hw->reg_base_addr; + + if (sxe_is_hw_fault(hw)) { + goto l_ret; + } + + hw->reg_write(value, base_addr + reg); + +l_ret: + return; +} + +#else + +STATIC u32 sxe_read_reg(struct sxe_hw *hw, u32 reg) +{ + u32 i, value; + u8 __iomem *base_addr = hw->reg_base_addr; + + value = rte_le_to_cpu_32(rte_read32(base_addr + reg)); + if (unlikely(SXE_REG_READ_FAIL == value)) { + + value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS)); + if (unlikely(SXE_REG_READ_FAIL != value)) { + + value = rte_le_to_cpu_32(rte_read32(base_addr + reg)); + } else { + LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n", + reg, SXE_STATUS, value); + for (i = 0; i < SXE_REG_READ_RETRY; i++) { + + value = rte_le_to_cpu_32(rte_read32(base_addr + SXE_STATUS)); + if (unlikely(SXE_REG_READ_FAIL != value)) { + + value = rte_le_to_cpu_32(rte_read32(base_addr + reg)); + LOG_INFO("reg[0x%x] read ok, value=%#x\n", + reg, value); + break; + } else { + LOG_ERROR("reg[0x%x] and reg[0x%x] read failed, ret=%#x\n", + reg, SXE_STATUS, value); + } + + mdelay(3); + } + } + } + + return value; +} + +STATIC void sxe_write_reg(struct sxe_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *base_addr = hw->reg_base_addr; + + rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg)); + + return; +} +#endif + +static void sxe_write_reg64(struct sxe_hw *hw, u32 reg, u64 value) +{ + u8 __iomem *reg_addr = hw->reg_base_addr; + + if (sxe_is_hw_fault(hw)) { + goto l_ret; + } + + writeq(value, reg_addr + reg); + +l_ret: + return; +} + + +void sxe_hw_no_snoop_disable(struct sxe_hw *hw) +{ + u32 ctrl_ext; + + ctrl_ext = SXE_REG_READ(hw, SXE_CTRL_EXT); + ctrl_ext |= SXE_CTRL_EXT_NS_DIS; + SXE_REG_WRITE(hw, SXE_CTRL_EXT, ctrl_ext); + SXE_WRITE_FLUSH(hw); + + return; +} + +s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw, + u8 rar_idx, u8 pool_idx) +{ + s32 ret = 0; + u32 value; + struct sxe_adapter *adapter = hw->adapter; + + if (rar_idx > SXE_UC_ENTRY_NUM_MAX) { + ret = -SXE_ERR_PARAM; + LOG_DEV_ERR("pool_idx:%d rar_idx:%d invalid.\n", + pool_idx, rar_idx); + goto l_end; + } + + if (pool_idx < 32) { + value = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx)); + value |= BIT(pool_idx); + SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), value); + } else { + value = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx)); + value |= BIT(pool_idx - 32); + SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), value); + } + +l_end: + return ret; +} + +static s32 sxe_hw_uc_addr_pool_disable(struct sxe_hw *hw, u8 rar_idx) +{ + u32 hi; + u32 low; + struct sxe_adapter *adapter = hw->adapter; + + hi = SXE_REG_READ(hw, SXE_MPSAR_HIGH(rar_idx)); + low = SXE_REG_READ(hw, SXE_MPSAR_LOW(rar_idx)); + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + if (!hi & !low) { + LOG_DEBUG_BDF("no need clear rar-pool relation register.\n"); + goto l_end; + } + + if (low) { + SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0); + } + if (hi) { + SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0); + } + + +l_end: + return 0; +} + +s32 sxe_hw_nic_reset(struct sxe_hw *hw) +{ + s32 ret = 0; + u32 ctrl, i; + struct sxe_adapter *adapter = hw->adapter; + + ctrl = SXE_CTRL_RST; + ctrl |= SXE_REG_READ(hw, SXE_CTRL); + ctrl &= ~SXE_CTRL_GIO_DIS; + SXE_REG_WRITE(hw, SXE_CTRL, ctrl); + + SXE_WRITE_FLUSH(hw); + usleep_range(1000, 1200); + + for (i = 0; i < 10; i++) { + ctrl = SXE_REG_READ(hw, SXE_CTRL); + if (!(ctrl & SXE_CTRL_RST_MASK)) { + break; + } + udelay(1); + } + + if (ctrl & SXE_CTRL_RST_MASK) { + ret = -SXE_ERR_RESET_FAILED; + LOG_DEV_ERR("reset polling failed to complete\n"); + } + + return ret; +} + +void sxe_hw_pf_rst_done_set(struct sxe_hw *hw) +{ + u32 value; + + value = SXE_REG_READ(hw, SXE_CTRL_EXT); + value |= SXE_CTRL_EXT_PFRSTD; + SXE_REG_WRITE(hw, SXE_CTRL_EXT, value); + + return; +} + +static void sxe_hw_regs_flush(struct sxe_hw *hw) +{ + SXE_WRITE_FLUSH(hw); + return; +} + +static const struct sxe_reg_info sxe_reg_info_tbl[] = { + + {SXE_CTRL, 1, 1, "CTRL"}, + {SXE_STATUS, 1, 1, "STATUS"}, + {SXE_CTRL_EXT, 1, 1, "CTRL_EXT"}, + + {SXE_EICR, 1, 1, "EICR"}, + + {SXE_SRRCTL(0), 16, 0x4, "SRRCTL"}, + {SXE_RDH(0), 64, 0x40, "RDH"}, + {SXE_RDT(0), 64, 0x40, "RDT"}, + {SXE_RXDCTL(0), 64, 0x40, "RXDCTL"}, + {SXE_RDBAL(0), 64, 0x40, "RDBAL"}, + {SXE_RDBAH(0), 64, 0x40, "RDBAH"}, + + {SXE_TDBAL(0), 32, 0x40, "TDBAL"}, + {SXE_TDBAH(0), 32, 0x40, "TDBAH"}, + {SXE_TDLEN(0), 32, 0x40, "TDLEN"}, + {SXE_TDH(0), 32, 0x40, "TDH"}, + {SXE_TDT(0), 32, 0x40, "TDT"}, + {SXE_TXDCTL(0), 32, 0x40, "TXDCTL"}, + + { .name = NULL } +}; + +static void sxe_hw_reg_print(struct sxe_hw *hw, + const struct sxe_reg_info *reginfo) +{ + u32 i, j; + s8 *value; + u32 first_reg_idx = 0; + u32 regs[SXE_DUMP_REGS_NUM]; + s8 reg_name[SXE_REG_NAME_LEN]; + s8 buf[SXE_DUMP_REG_STRING_LEN]; + struct sxe_adapter *adapter = hw->adapter; + + switch (reginfo->addr) { + case SXE_SRRCTL(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_SRRCTL(i)); + } + break; + case SXE_RDLEN(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RDLEN(i)); + } + break; + case SXE_RDH(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RDH(i)); + } + break; + case SXE_RDT(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RDT(i)); + } + break; + case SXE_RXDCTL(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RXDCTL(i)); + } + break; + case SXE_RDBAL(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RDBAL(i)); + } + break; + case SXE_RDBAH(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_RDBAH(i)); + } + break; + case SXE_TDBAL(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TDBAL(i)); + } + break; + case SXE_TDBAH(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TDBAH(i)); + } + break; + case SXE_TDLEN(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TDLEN(i)); + } + break; + case SXE_TDH(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TDH(i)); + } + break; + case SXE_TDT(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TDT(i)); + } + break; + case SXE_TXDCTL(0): + for (i = 0; i < SXE_DUMP_REGS_NUM; i++) { + regs[i] = SXE_REG_READ(hw, SXE_TXDCTL(i)); + } + break; + default: + LOG_DEV_INFO("%-15s %08x\n", + reginfo->name, SXE_REG_READ(hw, reginfo->addr)); + goto l_end; + } + + while (first_reg_idx < SXE_DUMP_REGS_NUM) { + value = buf; + snprintf(reg_name, SXE_REG_NAME_LEN, + "%s[%d-%d]", reginfo->name, + first_reg_idx, (first_reg_idx + 7)); + + for (j = 0; j < 8; j++) { + value += sprintf(value, " %08x", regs[first_reg_idx++]); + } + + LOG_DEV_ERR("%-15s%s\n", reg_name, buf); + } + +l_end: + return; +} + +static void sxe_hw_reg_dump(struct sxe_hw *hw) +{ + const struct sxe_reg_info *reginfo; + + for (reginfo = (const struct sxe_reg_info *)sxe_reg_info_tbl; + reginfo->name; reginfo++) { + sxe_hw_reg_print(hw, reginfo); + } + + return; +} + +static s32 sxe_hw_status_reg_test(struct sxe_hw *hw) +{ + s32 ret = 0; + u32 value, before, after; + u32 toggle = 0x7FFFF30F; + struct sxe_adapter *adapter = hw->adapter; + + before = SXE_REG_READ(hw, SXE_STATUS); + value = (SXE_REG_READ(hw, SXE_STATUS) & toggle); + SXE_REG_WRITE(hw, SXE_STATUS, toggle); + after = SXE_REG_READ(hw, SXE_STATUS) & toggle; + if (value != after) { + LOG_MSG_ERR(drv, "failed status register test got: " + "0x%08X expected: 0x%08X\n", + after, value); + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + + SXE_REG_WRITE(hw, SXE_STATUS, before); + +l_end: + return ret; +} + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +struct sxe_self_test_reg { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +static const struct sxe_self_test_reg self_test_reg[] = { + { SXE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 }, + { SXE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFE0, 0x8007FFF0 }, + { SXE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + { SXE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { SXE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_RDLEN(0), 4, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF }, + { SXE_RXDCTL(0), 4, WRITE_NO_TEST, 0, SXE_RXDCTL_ENABLE }, + { SXE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { SXE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + { SXE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { SXE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { SXE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + { SXE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { SXE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { .reg = 0 } +}; + +static s32 sxe_hw_reg_pattern_test(struct sxe_hw *hw, u32 reg, + u32 mask, u32 write) +{ + s32 ret = 0; + u32 pat, val, before; + struct sxe_adapter *adapter = hw->adapter; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE}; + + if (sxe_is_hw_fault(hw)) { + LOG_ERROR_BDF("hw fault\n"); + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = SXE_REG_READ(hw, reg); + + SXE_REG_WRITE(hw, reg, test_pattern[pat] & write); + val = SXE_REG_READ(hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + LOG_MSG_ERR(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + SXE_REG_WRITE(hw, reg, before); + ret = -SXE_DIAG_REG_PATTERN_TEST_ERR; + goto l_end; + } + + SXE_REG_WRITE(hw, reg, before); + } + +l_end: + return ret; +} + +static s32 sxe_hw_reg_set_and_check(struct sxe_hw *hw, int reg, + u32 mask, u32 write) +{ + s32 ret = 0; + u32 val, before; + struct sxe_adapter *adapter = hw->adapter; + + if (sxe_is_hw_fault(hw)) { + LOG_ERROR_BDF("hw fault\n"); + ret = -SXE_DIAG_TEST_BLOCKED; + goto l_end; + } + + before = SXE_REG_READ(hw, reg); + SXE_REG_WRITE(hw, reg, write & mask); + val = SXE_REG_READ(hw, reg); + if ((write & mask) != (val & mask)) { + LOG_MSG_ERR(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + SXE_REG_WRITE(hw, reg, before); + ret = -SXE_DIAG_CHECK_REG_TEST_ERR; + goto l_end; + } + + SXE_REG_WRITE(hw, reg, before); + +l_end: + return ret; +} + +STATIC s32 sxe_hw_regs_test(struct sxe_hw *hw) +{ + u32 i; + s32 ret = 0; + const struct sxe_self_test_reg *test = self_test_reg; + struct sxe_adapter *adapter = hw->adapter; + + ret = sxe_hw_status_reg_test(hw); + if (ret) { + LOG_MSG_ERR(drv, "status register test failed\n"); + goto l_end; + } + + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + ret = sxe_hw_reg_pattern_test(hw, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case TABLE32_TEST: + ret = sxe_hw_reg_pattern_test(hw, + test->reg + (i * 4), + test->mask, test->write); + break; + case TABLE64_TEST_LO: + ret = sxe_hw_reg_pattern_test(hw, + test->reg + (i * 8), + test->mask, test->write); + break; + case TABLE64_TEST_HI: + ret = sxe_hw_reg_pattern_test(hw, + (test->reg + 4) + (i * 8), + test->mask, test->write); + break; + case SET_READ_TEST: + ret = sxe_hw_reg_set_and_check(hw, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case WRITE_NO_TEST: + SXE_REG_WRITE(hw, test->reg + (i * 0x40), + test->write); + break; + default: + LOG_ERROR_BDF("reg test mod err, type=%d\n", + test->test_type); + break; + } + + if (ret) { + goto l_end; + } + + } + test++; + } + +l_end: + return ret; +} + +static const struct sxe_setup_operations sxe_setup_ops = { + .regs_dump = sxe_hw_reg_dump, + .reg_read = sxe_read_reg, + .reg_write = sxe_write_reg, + .regs_test = sxe_hw_regs_test, + .reset = sxe_hw_nic_reset, + .regs_flush = sxe_hw_regs_flush, + .pf_rst_done_set = sxe_hw_pf_rst_done_set, + .no_snoop_disable = sxe_hw_no_snoop_disable, +}; + + +static void sxe_hw_ring_irq_enable(struct sxe_hw *hw, u64 qmask) +{ + u32 mask0, mask1; + + mask0 = qmask & 0xFFFFFFFF; + mask1 = qmask >> 32; + + if (mask0 && mask1) { + SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0); + SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1); + } else if (mask0) { + SXE_REG_WRITE(hw, SXE_EIMS_EX(0), mask0); + } else if (mask1) { + SXE_REG_WRITE(hw, SXE_EIMS_EX(1), mask1); + } + + return; +} + +u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_EICR); +} + +void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EICR, value); + return; +} + +u32 sxe_hw_irq_cause_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_EICS); +} + +static void sxe_hw_event_irq_trigger(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_EICS, (SXE_EICS_TCP_TIMER | SXE_EICS_OTHER)); + + return; +} + +static void sxe_hw_ring_irq_trigger(struct sxe_hw *hw, u64 eics) +{ + u32 mask; + + mask = (eics & 0xFFFFFFFF); + SXE_REG_WRITE(hw, SXE_EICS_EX(0), mask); + mask = (eics >> 32); + SXE_REG_WRITE(hw, SXE_EICS_EX(1), mask); + return; +} + +void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw, + bool is_msix) +{ + if (true == is_msix) { + SXE_REG_WRITE(hw, SXE_EIAM_EX(0), 0xFFFFFFFF); + SXE_REG_WRITE(hw, SXE_EIAM_EX(1), 0xFFFFFFFF); + } else { + SXE_REG_WRITE(hw, SXE_EIAM, SXE_EICS_RTX_QUEUE); + } + + return; +} + +void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_GPIE, value); + + return; +} + +u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_GPIE); +} + +static void sxe_hw_set_eitrsel(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EITRSEL, value); + + return; +} + +void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx) +{ + u8 allocation; + u32 ivar, position; + + allocation = irq_idx | SXE_IVAR_ALLOC_VALID; + + position = (offset & 1) * 8; + + ivar = SXE_REG_READ(hw, SXE_IVAR_MISC); + ivar &= ~(0xFF << position); + ivar |= (allocation << position); + + SXE_REG_WRITE(hw, SXE_IVAR_MISC, ivar); + + return; +} + +void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx, + u16 reg_idx, u16 irq_idx) +{ + u8 allocation; + u32 ivar, position; + + allocation = irq_idx | SXE_IVAR_ALLOC_VALID; + + position = ((reg_idx & 1) * 16) + (8 * is_tx); + + ivar = SXE_REG_READ(hw, SXE_IVAR(reg_idx >> 1)); + ivar &= ~(0xFF << position); + ivar |= (allocation << position); + + SXE_REG_WRITE(hw, SXE_IVAR(reg_idx >> 1), ivar); + + return; +} + +void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw, + u16 irq_idx, u32 interval) +{ + u32 eitr = interval & SXE_EITR_ITR_MASK; + + eitr |= SXE_EITR_CNT_WDIS; + + SXE_REG_WRITE(hw, SXE_EITR(irq_idx), eitr); + + return; +} + +static void sxe_hw_event_irq_interval_set(struct sxe_hw *hw, + u16 irq_idx, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EITR(irq_idx), value); + + return; +} + +void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EIAC, value); + + return; +} + +void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EIMC, value); + + return; +} + +void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EIMS, value); + + return; +} + +void sxe_hw_all_irq_disable(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_EIMC, 0xFFFF0000); + + SXE_REG_WRITE(hw, SXE_EIMC_EX(0), ~0); + SXE_REG_WRITE(hw, SXE_EIMC_EX(1), ~0); + + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_spp_configure(struct sxe_hw *hw, u32 hw_spp_proc_delay_us) +{ + SXE_REG_WRITE(hw, SXE_SPP_PROC, + (SXE_REG_READ(hw, SXE_SPP_PROC) & + ~SXE_SPP_PROC_DELAY_US_MASK) | + hw_spp_proc_delay_us); + + return; +} + +static s32 sxe_hw_irq_test(struct sxe_hw *hw, u32 *icr, bool shared) +{ + s32 ret = 0; + u32 i, mask; + struct sxe_adapter *adapter = hw->adapter; + + sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + for (i = 0; i < 10; i++) { + mask = BIT(i); + if (!shared) { + LOG_INFO_BDF("test irq: irq test start\n"); + *icr = 0; + SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF); + SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + if (*icr & mask) { + LOG_ERROR_BDF("test irq: failed, eicr = %x\n", *icr); + ret = -SXE_DIAG_DISABLE_IRQ_TEST_ERR; + break; + } + LOG_INFO_BDF("test irq: irq test end\n"); + } + + LOG_INFO_BDF("test irq: mask irq test start\n"); + *icr = 0; + SXE_REG_WRITE(hw, SXE_EIMS, mask); + SXE_REG_WRITE(hw, SXE_EICS, mask); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + if (!(*icr & mask)) { + LOG_ERROR_BDF("test irq: mask failed, eicr = %x\n", *icr); + ret = -SXE_DIAG_ENABLE_IRQ_TEST_ERR; + break; + } + LOG_INFO_BDF("test irq: mask irq test end\n"); + + sxe_hw_specific_irq_disable(hw, mask); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + if (!shared) { + LOG_INFO_BDF("test irq: other irq test start\n"); + *icr = 0; + SXE_REG_WRITE(hw, SXE_EIMC, ~mask & 0x00007FFF); + SXE_REG_WRITE(hw, SXE_EICS, ~mask & 0x00007FFF); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + if (*icr) { + LOG_ERROR_BDF("test irq: other irq failed, eicr = %x\n", *icr); + ret = -SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR; + break; + } + LOG_INFO_BDF("test irq: other irq test end\n"); + } + } + + sxe_hw_specific_irq_disable(hw, 0xFFFFFFFF); + sxe_hw_regs_flush(hw); + usleep_range(10000, 20000); + + return ret; +} + +static const struct sxe_irq_operations sxe_irq_ops = { + .event_irq_auto_clear_set = sxe_hw_event_irq_auto_clear_set, + .ring_irq_interval_set = sxe_hw_ring_irq_interval_set, + .event_irq_interval_set = sxe_hw_event_irq_interval_set, + .set_eitrsel = sxe_hw_set_eitrsel, + .ring_irq_map = sxe_hw_ring_irq_map, + .event_irq_map = sxe_hw_event_irq_map, + .irq_general_reg_set = sxe_hw_irq_general_reg_set, + .irq_general_reg_get = sxe_hw_irq_general_reg_get, + .ring_irq_auto_disable = sxe_hw_ring_irq_auto_disable, + .pending_irq_read_clear = sxe_hw_pending_irq_read_clear, + .pending_irq_write_clear = sxe_hw_pending_irq_write_clear, + .ring_irq_enable = sxe_hw_ring_irq_enable, + .irq_cause_get = sxe_hw_irq_cause_get, + .event_irq_trigger = sxe_hw_event_irq_trigger, + .ring_irq_trigger = sxe_hw_ring_irq_trigger, + .specific_irq_disable = sxe_hw_specific_irq_disable, + .specific_irq_enable = sxe_hw_specific_irq_enable, + .all_irq_disable = sxe_hw_all_irq_disable, + .spp_configure = sxe_hw_spp_configure, + .irq_test = sxe_hw_irq_test, +}; + + +u32 sxe_hw_link_speed_get(struct sxe_hw *hw) +{ + u32 speed, value; + struct sxe_adapter *adapter = hw->adapter; + value = SXE_REG_READ(hw, SXE_COMCTRL); + + if ((value & SXE_COMCTRL_SPEED_10G) == SXE_COMCTRL_SPEED_10G) { + speed = SXE_LINK_SPEED_10GB_FULL; + } else if ((value & SXE_COMCTRL_SPEED_1G) == SXE_COMCTRL_SPEED_1G) { + speed = SXE_LINK_SPEED_1GB_FULL; + } else { + speed = SXE_LINK_SPEED_UNKNOWN; + } + + LOG_DEBUG_BDF("hw link speed=%x, (0x80=10G, 0x20=1G)\n, reg=%x", + speed, value); + + return speed; +} + +void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed) +{ + u32 ctrl; + + ctrl = SXE_REG_READ(hw, SXE_COMCTRL); + + if (SXE_LINK_SPEED_1GB_FULL == speed) { + ctrl |= SXE_COMCTRL_SPEED_1G; + } else if (SXE_LINK_SPEED_10GB_FULL == speed) { + ctrl |= SXE_COMCTRL_SPEED_10G; + } + + SXE_REG_WRITE(hw, SXE_COMCTRL, ctrl); + + return; +} + +STATIC bool sxe_hw_1g_link_up_check(struct sxe_hw *hw) +{ + return (SXE_REG_READ(hw, SXE_LINKS) & SXE_LINKS_UP) ? true : false; +} + +bool sxe_hw_is_link_state_up(struct sxe_hw *hw) +{ + bool ret = false; + u32 links_reg, link_speed; + struct sxe_adapter *adapter = hw->adapter; + + links_reg = SXE_REG_READ(hw, SXE_LINKS); + + LOG_DEBUG_BDF("nic link reg: 0x%x\n", links_reg); + + if (links_reg & SXE_LINKS_UP) { + ret = true; + + link_speed = sxe_hw_link_speed_get(hw); + if ((link_speed == SXE_LINK_SPEED_10GB_FULL) && + (links_reg & SXE_10G_LINKS_DOWN)) { + ret = false; + } + } + + return ret; +} + +void sxe_hw_mac_pad_enable(struct sxe_hw *hw) +{ + u32 ctl; + + ctl = SXE_REG_READ(hw, SXE_MACCFG); + ctl |= SXE_MACCFG_PAD_EN; + SXE_REG_WRITE(hw, SXE_MACCFG, ctl); + + return; +} + +s32 sxe_hw_fc_enable(struct sxe_hw *hw) +{ + s32 ret = 0; + u8 i; + u32 reg; + u32 flctrl_val; + u32 fcrtl, fcrth; + struct sxe_adapter *adapter = hw->adapter; + + flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL); + flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK | + SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK); + + switch (hw->fc.current_mode) { + case SXE_FC_NONE: + break; + case SXE_FC_RX_PAUSE: + flctrl_val |= SXE_FCTRL_RFCE_LFC_EN; + break; + case SXE_FC_TX_PAUSE: + flctrl_val |= SXE_FCTRL_TFCE_LFC_EN; + break; + case SXE_FC_FULL: + flctrl_val |= SXE_FCTRL_RFCE_LFC_EN; + flctrl_val |= SXE_FCTRL_TFCE_LFC_EN; + break; + default: + LOG_DEV_DEBUG("flow control param set incorrectly\n"); + ret = -SXE_ERR_CONFIG; + goto l_ret; + } + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE; + SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl); + fcrth = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN; + } else { + SXE_REG_WRITE(hw, SXE_FCRTL(i), 0); + fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1; + } + + SXE_REG_WRITE(hw, SXE_FCRTH(i), fcrth); + } + + flctrl_val |= SXE_FCTRL_TFCE_DPF_EN; + + if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) { + flctrl_val |= (SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK); + } + + SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val); + + reg = SXE_REG_READ(hw, SXE_PFCTOP); + reg &= ~SXE_PFCTOP_FCOP_MASK; + reg |= SXE_PFCTOP_FCT; + reg |= SXE_PFCTOP_FCOP_LFC; + SXE_REG_WRITE(hw, SXE_PFCTOP, reg); + + reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) { + SXE_REG_WRITE(hw, SXE_FCTTV(i), reg); + } + + SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2); + +l_ret: + return ret; +} + +void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw) +{ + u32 reg = 0; + + if (hw->fc.requested_mode == SXE_FC_DEFAULT) { + hw->fc.requested_mode = SXE_FC_FULL; + } + + reg = SXE_REG_READ(hw, SXE_PCS1GANA); + + switch (hw->fc.requested_mode) { + case SXE_FC_NONE: + reg &= ~(SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE); + break; + case SXE_FC_TX_PAUSE: + reg |= SXE_PCS1GANA_ASM_PAUSE; + reg &= ~SXE_PCS1GANA_SYM_PAUSE; + break; + case SXE_FC_RX_PAUSE: + case SXE_FC_FULL: + reg |= SXE_PCS1GANA_SYM_PAUSE | SXE_PCS1GANA_ASM_PAUSE; + break; + default: + LOG_ERROR("Flow control param set incorrectly."); + break; + } + + SXE_REG_WRITE(hw, SXE_PCS1GANA, reg); + return; +} + +s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx) +{ + s32 ret = 0; + u8 i; + u32 reg; + u32 flctrl_val; + u32 fcrtl, fcrth; + struct sxe_adapter *adapter = hw->adapter; + u8 rx_en_num; + + flctrl_val = SXE_REG_READ(hw, SXE_FLCTRL); + flctrl_val &= ~(SXE_FCTRL_TFCE_MASK | SXE_FCTRL_RFCE_MASK | + SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK); + + switch (hw->fc.current_mode) { + case SXE_FC_NONE: + rx_en_num = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = SXE_REG_READ(hw, SXE_FCRTH(i)); + if (reg & SXE_FCRTH_FCEN) { + rx_en_num++; + } + } + if (rx_en_num > 1) { + flctrl_val |= SXE_FCTRL_TFCE_PFC_EN; + } + + break; + + case SXE_FC_RX_PAUSE: + flctrl_val |= SXE_FCTRL_RFCE_PFC_EN; + + rx_en_num = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = SXE_REG_READ(hw, SXE_FCRTH(i)); + if (reg & SXE_FCRTH_FCEN) { + rx_en_num++; + } + } + + if (rx_en_num > 1) { + flctrl_val |= SXE_FCTRL_TFCE_PFC_EN; + } + + break; + case SXE_FC_TX_PAUSE: + flctrl_val |= SXE_FCTRL_TFCE_PFC_EN; + break; + case SXE_FC_FULL: + flctrl_val |= SXE_FCTRL_RFCE_PFC_EN; + flctrl_val |= SXE_FCTRL_TFCE_PFC_EN; + break; + default: + LOG_DEV_DEBUG("flow control param set incorrectly\n"); + ret = -SXE_ERR_CONFIG; + goto l_ret; + } + + if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) && + hw->fc.high_water[tc_idx]) { + fcrtl = (hw->fc.low_water[tc_idx] << 9) | SXE_FCRTL_XONE; + SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), fcrtl); + fcrth = (hw->fc.high_water[tc_idx] << 9) | SXE_FCRTH_FCEN; + } else { + SXE_REG_WRITE(hw, SXE_FCRTL(tc_idx), 0); + fcrth = (SXE_REG_READ(hw, SXE_RXPBSIZE(tc_idx)) - 24576) >> 1; + } + + SXE_REG_WRITE(hw, SXE_FCRTH(tc_idx), fcrth); + + flctrl_val |= SXE_FCTRL_TFCE_DPF_EN; + + if ((hw->fc.current_mode & SXE_FC_TX_PAUSE)) { + flctrl_val |= (BIT(tc_idx) << 16) & SXE_FCTRL_TFCE_FCEN_MASK; + flctrl_val |= (BIT(tc_idx) << 24) & SXE_FCTRL_TFCE_XONE_MASK; + } + + SXE_REG_WRITE(hw, SXE_FLCTRL, flctrl_val); + + reg = SXE_REG_READ(hw, SXE_PFCTOP); + reg &= ~SXE_PFCTOP_FCOP_MASK; + reg |= SXE_PFCTOP_FCT; + reg |= SXE_PFCTOP_FCOP_PFC; + SXE_REG_WRITE(hw, SXE_PFCTOP, reg); + + reg = hw->fc.pause_time * 0x00010001U; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) { + SXE_REG_WRITE(hw, SXE_FCTTV(i), reg); + } + + SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2); + +l_ret: + return ret; +} + +void sxe_hw_crc_configure(struct sxe_hw *hw) +{ + u32 ctrl = SXE_REG_READ(hw, SXE_PCCTRL); + + ctrl |= SXE_PCCTRL_TXCE | SXE_PCCTRL_RXCE | SXE_PCCTRL_PCSC_ALL; + SXE_REG_WRITE(hw, SXE_PCCTRL, ctrl); + + return; +} + +void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable) +{ + u32 value; + + value = (true == is_enable) ? SXE_LPBK_EN : 0; + + SXE_REG_WRITE(hw, SXE_LPBKCTRL, value); + + return; +} + +void sxe_hw_mac_txrx_enable(struct sxe_hw *hw) +{ + u32 ctl; + + ctl = SXE_REG_READ(hw, SXE_COMCTRL); + ctl |= SXE_COMCTRL_TXEN | SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL; + SXE_REG_WRITE(hw, SXE_COMCTRL, ctl); + + return; +} + +void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame) +{ + u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS); + + if (max_frame != (maxfs >> SXE_MAXFS_MFS_SHIFT)) { + maxfs &= ~SXE_MAXFS_MFS_MASK; + maxfs |= max_frame << SXE_MAXFS_MFS_SHIFT; + } + + maxfs |= SXE_MAXFS_RFSEL | SXE_MAXFS_TFSEL; + SXE_REG_WRITE(hw, SXE_MAXFS, maxfs); + + return; +} + +u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw) +{ + u32 maxfs = SXE_REG_READ(hw, SXE_MAXFS); + + maxfs &= SXE_MAXFS_MFS_MASK; + maxfs >>= SXE_MAXFS_MFS_SHIFT; + + return maxfs; +} + +bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw) +{ + bool supported = true; + bool link_up = sxe_hw_is_link_state_up(hw); + u32 link_speed = sxe_hw_link_speed_get(hw); + + if (link_up) { + supported = (link_speed == SXE_LINK_SPEED_1GB_FULL) ? + true : false; + } + + return supported; +} + +STATIC void sxe_hw_fc_param_init(struct sxe_hw *hw) +{ + hw->fc.requested_mode = SXE_FC_FULL; + hw->fc.current_mode = SXE_FC_FULL; + hw->fc.pause_time = SXE_DEFAULT_FCPAUSE; + + hw->fc.disable_fc_autoneg = true; + return; +} + +void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw, + u8 tc_idx, u32 mark) +{ + hw->fc.high_water[tc_idx] = mark; + + return; +} + +void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw, + u8 tc_idx, u32 mark) +{ + hw->fc.low_water[tc_idx] = mark; + + return; +} + +bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw) +{ + return hw->fc.disable_fc_autoneg; +} + +void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw, + bool is_disabled) +{ + hw->fc.disable_fc_autoneg = is_disabled; + return; +} + +static enum sxe_fc_mode sxe_hw_fc_current_mode_get(struct sxe_hw *hw) +{ + return hw->fc.current_mode; +} + +static enum sxe_fc_mode sxe_hw_fc_requested_mode_get(struct sxe_hw *hw) +{ + return hw->fc.requested_mode; +} + +void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw, + enum sxe_fc_mode mode) +{ + hw->fc.requested_mode = mode; + return; +} + +static const struct sxe_mac_operations sxe_mac_ops = { + .link_up_1g_check = sxe_hw_1g_link_up_check, + .link_state_is_up = sxe_hw_is_link_state_up, + .link_speed_get = sxe_hw_link_speed_get, + .link_speed_set = sxe_hw_link_speed_set, + .pad_enable = sxe_hw_mac_pad_enable, + .crc_configure = sxe_hw_crc_configure, + .loopback_switch = sxe_hw_loopback_switch, + .txrx_enable = sxe_hw_mac_txrx_enable, + .max_frame_set = sxe_hw_mac_max_frame_set, + .max_frame_get = sxe_hw_mac_max_frame_get, + .fc_enable = sxe_hw_fc_enable, + .fc_autoneg_localcap_set = sxe_fc_autoneg_localcap_set, + .fc_tc_high_water_mark_set = sxe_hw_fc_tc_high_water_mark_set, + .fc_tc_low_water_mark_set = sxe_hw_fc_tc_low_water_mark_set, + .fc_param_init = sxe_hw_fc_param_init, + .fc_current_mode_get = sxe_hw_fc_current_mode_get, + .fc_requested_mode_get = sxe_hw_fc_requested_mode_get, + .fc_requested_mode_set = sxe_hw_fc_requested_mode_set, + .is_fc_autoneg_disabled = sxe_hw_is_fc_autoneg_disabled, + .fc_autoneg_disable_set = sxe_hw_fc_autoneg_disable_set, +}; + +u32 sxe_hw_rx_mode_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_FCTRL); +} + +u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx) +{ + return SXE_REG_READ(hw, SXE_VMOLR(pool_idx)); +} + +void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl) +{ + SXE_REG_WRITE(hw, SXE_FCTRL, filter_ctrl); + return; +} + +void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw, + u32 vmolr, u16 pool_idx) +{ + SXE_REG_WRITE(hw, SXE_VMOLR(pool_idx), vmolr); + return; +} + +void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable) +{ + u32 rfctl = SXE_REG_READ(hw, SXE_RFCTL); + rfctl &= ~SXE_RFCTL_LRO_DIS; + + if (!is_enable) { + rfctl |= SXE_RFCTL_LRO_DIS; + } + + SXE_REG_WRITE(hw, SXE_RFCTL, rfctl); + return; +} + +void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw) +{ + u32 rfctl = 0; + + rfctl |= (SXE_RFCTL_NFSW_DIS | SXE_RFCTL_NFSR_DIS); + SXE_REG_WRITE(hw, SXE_RFCTL, rfctl); + return; +} + +void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw) +{ + u32 rxcsum; + + rxcsum = SXE_REG_READ(hw, SXE_RXCSUM); + rxcsum |= SXE_RXCSUM_PCSD; + SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum); + return; +} + +void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr) +{ + u32 mac_addr_h, mac_addr_l; + + mac_addr_l = ((u32)mac_addr[5] | + ((u32)mac_addr[4] << 8) | + ((u32)mac_addr[3] << 16) | + ((u32)mac_addr[2] << 24)); + mac_addr_h = (((u32)mac_addr[1] << 16) | + ((u32)mac_addr[0] << 24)); + + SXE_REG_WRITE(hw, SXE_SACONH, mac_addr_h); + SXE_REG_WRITE(hw, SXE_SACONL, mac_addr_l); + + return; +} + +s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx, + u8 *addr, u32 pool_idx) +{ + s32 ret = 0; + u32 rar_low, rar_high; + struct sxe_adapter *adapter = hw->adapter; + + if (rar_idx >= SXE_UC_ENTRY_NUM_MAX) { + LOG_DEV_DEBUG("RAR rar_idx %d is out of range:%u.\n", + rar_idx, SXE_UC_ENTRY_NUM_MAX); + ret = -SXE_ERR_PARAM; + goto l_end; + } + + sxe_hw_uc_addr_pool_enable(hw, rar_idx, pool_idx); + + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = SXE_REG_READ(hw, SXE_RAH(rar_idx)); + rar_high &= ~(0x0000FFFF | SXE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + rar_high |= SXE_RAH_AV; + + SXE_REG_WRITE(hw, SXE_RAL(rar_idx), rar_low); + SXE_WRITE_FLUSH(hw); + SXE_REG_WRITE(hw, SXE_RAH(rar_idx), rar_high); + + LOG_DEBUG_BDF("rar_idx:%d pool_idx:%u addr:%pM add to rar done\n", + rar_idx, pool_idx, addr); + +l_end: + return ret; +} + +s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index) +{ + s32 ret = 0; + u32 rar_high; + struct sxe_adapter *adapter = hw->adapter; + + if (index >= SXE_UC_ENTRY_NUM_MAX) { + ret = -SXE_ERR_PARAM; + LOG_ERROR_BDF("uc_entry_num:%d index:%u invalid.(err:%d)\n", + SXE_UC_ENTRY_NUM_MAX, index, ret); + goto l_end; + } + + rar_high = SXE_REG_READ(hw, SXE_RAH(index)); + rar_high &= ~(0x0000FFFF | SXE_RAH_AV); + + SXE_REG_WRITE(hw, SXE_RAH(index), rar_high); + SXE_WRITE_FLUSH(hw); + SXE_REG_WRITE(hw, SXE_RAL(index), 0); + + sxe_hw_uc_addr_pool_disable(hw, index); + +l_end: + return ret; +} + +void sxe_hw_mta_hash_table_set(struct sxe_hw *hw, + u8 index, u32 value) +{ + SXE_REG_WRITE(hw, SXE_MTA(index), value); + return; +} + +void sxe_hw_mta_hash_table_update(struct sxe_hw *hw, + u8 reg_idx, u8 bit_idx) +{ + u32 value = SXE_REG_READ(hw, SXE_MTA(reg_idx)); + + value |= BIT(bit_idx); + + LOG_INFO("mta update value:0x%x.\n", value); + SXE_REG_WRITE(hw, SXE_MTA(reg_idx), value); + + return; +} + +void sxe_hw_mc_filter_enable(struct sxe_hw *hw) +{ + u32 value = SXE_MC_FILTER_TYPE0 | SXE_MCSTCTRL_MFE; + + SXE_REG_WRITE(hw, SXE_MCSTCTRL, value); + + return; +} + +static void sxe_hw_mc_filter_disable(struct sxe_hw *hw) +{ + u32 value = SXE_REG_READ(hw, SXE_MCSTCTRL); + + value &= ~SXE_MCSTCTRL_MFE; + + SXE_REG_WRITE(hw, SXE_MCSTCTRL, value); + + return; +} + +void sxe_hw_uc_addr_clear(struct sxe_hw *hw) +{ + u32 i; + struct sxe_adapter *adapter = hw->adapter; + + sxe_hw_uc_addr_pool_disable(hw, 0); + + LOG_DEV_DEBUG("clear uc filter addr register:0-%d\n", + SXE_UC_ENTRY_NUM_MAX - 1); + for (i = 0; i < SXE_UC_ENTRY_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_RAL(i), 0); + SXE_REG_WRITE(hw, SXE_RAH(i), 0); + } + + LOG_DEV_DEBUG("clear %u uta filter addr register\n", + SXE_UTA_ENTRY_NUM_MAX); + for (i = 0; i < SXE_UTA_ENTRY_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_UTA(i), 0); + } + + SXE_REG_WRITE(hw, SXE_MCSTCTRL, SXE_MC_FILTER_TYPE0); + + LOG_DEV_DEBUG("clear %u mta filter addr register\n", + SXE_MTA_ENTRY_NUM_MAX); + for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_MTA(i), 0); + } + + return; +} + +static void sxe_hw_ethertype_filter_set(struct sxe_hw *hw, + u8 filter_type, u32 value) +{ + SXE_REG_WRITE(hw, SXE_ETQF(filter_type), value); + return; +} + +void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 default_pool) +{ + u32 ctrl; + + ctrl = SXE_REG_READ(hw, SXE_VT_CTL); + + ctrl |= SXE_VT_CTL_VT_ENABLE; + ctrl &= ~SXE_VT_CTL_POOL_MASK; + ctrl |= default_pool << SXE_VT_CTL_POOL_SHIFT; + ctrl |= SXE_VT_CTL_REPLEN; + + SXE_REG_WRITE(hw, SXE_VT_CTL, ctrl); + + return; +} + +void sxe_hw_vt_disable(struct sxe_hw *hw) +{ + u32 vmdctl; + + vmdctl = SXE_REG_READ(hw, SXE_VT_CTL); + vmdctl &= ~SXE_VMD_CTL_POOL_EN; + SXE_REG_WRITE(hw, SXE_VT_CTL, vmdctl); + + return; +} + +#ifdef SXE_WOL_CONFIGURE + +static void sxe_hw_wol_status_set(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_WUS, ~0); + + return; +} + +static void sxe_hw_wol_mode_set(struct sxe_hw *hw, u32 wol_status) +{ + u32 fctrl; + + SXE_REG_WRITE(hw, SXE_WUC, SXE_WUC_PME_EN); + + fctrl = SXE_REG_READ(hw, SXE_FCTRL); + fctrl |= SXE_FCTRL_BAM; + if (wol_status & SXE_WUFC_MC) { + fctrl |= SXE_FCTRL_MPE; + } + + SXE_REG_WRITE(hw, SXE_FCTRL, fctrl); + + SXE_REG_WRITE(hw, SXE_WUFC, wol_status); + sxe_hw_wol_status_set(hw); + + return; +} + +static void sxe_hw_wol_mode_clean(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_WUC, 0); + SXE_REG_WRITE(hw, SXE_WUFC, 0); + + return; +} +#endif + +static const struct sxe_filter_mac_operations sxe_filter_mac_ops = { + .rx_mode_get = sxe_hw_rx_mode_get, + .rx_mode_set = sxe_hw_rx_mode_set, + .pool_rx_mode_get = sxe_hw_pool_rx_mode_get, + .pool_rx_mode_set = sxe_hw_pool_rx_mode_set, + .rx_lro_enable = sxe_hw_rx_lro_enable, + .uc_addr_add = sxe_hw_uc_addr_add, + .uc_addr_del = sxe_hw_uc_addr_del, + .uc_addr_clear = sxe_hw_uc_addr_clear, + .fc_mac_addr_set = sxe_hw_fc_mac_addr_set, + .mta_hash_table_set = sxe_hw_mta_hash_table_set, + .mta_hash_table_update = sxe_hw_mta_hash_table_update, + + .mc_filter_enable = sxe_hw_mc_filter_enable, + .mc_filter_disable = sxe_hw_mc_filter_disable, + .rx_nfs_filter_disable = sxe_hw_rx_nfs_filter_disable, + .ethertype_filter_set = sxe_hw_ethertype_filter_set, + .vt_ctrl_configure = sxe_hw_vt_ctrl_cfg, + .uc_addr_pool_enable = sxe_hw_uc_addr_pool_enable, + .rx_udp_frag_checksum_disable = sxe_hw_rx_udp_frag_checksum_disable, + +#ifdef SXE_WOL_CONFIGURE + .wol_mode_set = sxe_hw_wol_mode_set, + .wol_mode_clean = sxe_hw_wol_mode_clean, + .wol_status_set = sxe_hw_wol_status_set, +#endif + + .vt_disable = sxe_hw_vt_disable, +}; + +u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index) +{ + return SXE_REG_READ(hw, SXE_VLVF(reg_index)); +} + +static void sxe_hw_vlan_pool_filter_write(struct sxe_hw *hw, + u16 reg_index, u32 value) +{ + SXE_REG_WRITE(hw, SXE_VLVF(reg_index), value); + return; +} + +static u32 sxe_hw_vlan_pool_filter_bitmap_read(struct sxe_hw *hw, + u16 reg_index) +{ + return SXE_REG_READ(hw, SXE_VLVFB(reg_index)); +} + +static void sxe_hw_vlan_pool_filter_bitmap_write(struct sxe_hw *hw, + u16 reg_index, u32 value) +{ + SXE_REG_WRITE(hw, SXE_VLVFB(reg_index), value); + return; +} + +void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw, + u16 reg_index, u32 value) +{ + SXE_REG_WRITE(hw, SXE_VFTA(reg_index), value); + return; +} + +u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index) +{ + return SXE_REG_READ(hw, SXE_VFTA(reg_index)); +} + +void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable) +{ + u32 vlnctrl; + + vlnctrl = SXE_REG_READ(hw, SXE_VLNCTRL); + if (is_enable) { + vlnctrl |= SXE_VLNCTRL_VFE; + } else { + vlnctrl &= ~SXE_VLNCTRL_VFE; + } + + SXE_REG_WRITE(hw, SXE_VLNCTRL, vlnctrl); + return; +} + +static void sxe_hw_vlan_untagged_pkts_rcv_switch(struct sxe_hw *hw, + u32 vf, bool accept) +{ + u32 vmolr = SXE_REG_READ(hw, SXE_VMOLR(vf)); + vmolr |= SXE_VMOLR_BAM; + if (accept) { + vmolr |= SXE_VMOLR_AUPE; + } else { + vmolr &= ~SXE_VMOLR_AUPE; + } + + LOG_WARN("vf:%u value:0x%x.\n", vf, vmolr); + SXE_REG_WRITE(hw, SXE_VMOLR(vf), vmolr); + return; +} + +s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 ret, regindex, first_empty_slot; + u32 bits; + struct sxe_adapter *adapter = hw->adapter; + + if (vlan == 0) { + ret = 0; + goto l_end; + } + + first_empty_slot = vlvf_bypass ? -SXE_ERR_NO_SPACE : 0; + + vlan |= SXE_VLVF_VIEN; + + for (regindex = SXE_VLVF_ENTRIES; --regindex;) { + bits = SXE_REG_READ(hw, SXE_VLVF(regindex)); + if (bits == vlan) { + ret = regindex; + goto l_end; + } + + if (!first_empty_slot && !bits) { + first_empty_slot = regindex; + } + } + + if (!first_empty_slot) { + LOG_DEV_WARN("no space in VLVF.\n"); + } + + ret = first_empty_slot ? : -SXE_ERR_NO_SPACE; +l_end: + return ret; +} + +s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw, + u32 vid, u32 pool, + bool vlan_on, bool vlvf_bypass) +{ + s32 ret = 0; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; + + LOG_DEBUG("vid: %u, pool: %u, vlan_on: %d, vlvf_bypass: %d", + vid, pool, vlan_on, vlvf_bypass); + + if ((vid > 4095) || (pool > 63)) { + ret = -SXE_ERR_PARAM; + goto l_end; + } + + + regidx = vid / 32; + vfta_delta = BIT(vid % 32); + vfta = SXE_REG_READ(hw, SXE_VFTA(regidx)); + + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; + + if (!(SXE_REG_READ(hw, SXE_VT_CTL) & SXE_VT_CTL_VT_ENABLE)) { + goto vfta_update; + } + + vlvf_index = sxe_hw_vlvf_slot_find(hw, vid, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) { + goto vfta_update; + } + + ret = vlvf_index; + goto l_end; + } + + bits = SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32)); + + bits |= BIT(pool % 32); + if (vlan_on) { + goto vlvf_update; + } + + bits ^= BIT(pool % 32); + + if (!bits && + !SXE_REG_READ(hw, SXE_VLVFB(vlvf_index * 2 + 1 - pool / 32))) { + if (vfta_delta) { + SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta); + } + + SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), 0); + SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), 0); + + goto l_end; + } + + vfta_delta = 0; + +vlvf_update: + SXE_REG_WRITE(hw, SXE_VLVFB(vlvf_index * 2 + pool / 32), bits); + SXE_REG_WRITE(hw, SXE_VLVF(vlvf_index), SXE_VLVF_VIEN | vid); + +vfta_update: + if (vfta_delta) { + SXE_REG_WRITE(hw, SXE_VFTA(regidx), vfta); + } + +l_end: + return ret; +} + +void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < SXE_VFT_TBL_SIZE; offset++) { + SXE_REG_WRITE(hw, SXE_VFTA(offset), 0); + } + + for (offset = 0; offset < SXE_VLVF_ENTRIES; offset++) { + SXE_REG_WRITE(hw, SXE_VLVF(offset), 0); + SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2), 0); + SXE_REG_WRITE(hw, SXE_VLVFB(offset * 2 + 1), 0); + } + + return; +} + +static const struct sxe_filter_vlan_operations sxe_filter_vlan_ops = { + .pool_filter_read = sxe_hw_vlan_pool_filter_read, + .pool_filter_write = sxe_hw_vlan_pool_filter_write, + .pool_filter_bitmap_read = sxe_hw_vlan_pool_filter_bitmap_read, + .pool_filter_bitmap_write = sxe_hw_vlan_pool_filter_bitmap_write, + .filter_array_write = sxe_hw_vlan_filter_array_write, + .filter_array_read = sxe_hw_vlan_filter_array_read, + .filter_array_clear = sxe_hw_vlan_filter_array_clear, + .filter_switch = sxe_hw_vlan_filter_switch, + .untagged_pkts_rcv_switch = sxe_hw_vlan_untagged_pkts_rcv_switch, + .filter_configure = sxe_hw_vlan_filter_configure, +}; + + +static void sxe_hw_rx_pkt_buf_switch(struct sxe_hw *hw, bool is_on) +{ + u32 dbucfg = SXE_REG_READ(hw, SXE_DRXCFG); + + if (is_on) { + dbucfg |= SXE_DRXCFG_DBURX_START; + } else { + dbucfg &= ~SXE_DRXCFG_DBURX_START; + } + + SXE_REG_WRITE(hw, SXE_DRXCFG, dbucfg); + + return; +} + +static void sxe_hw_rx_pkt_buf_size_configure(struct sxe_hw *hw, + u8 num_pb, + u32 headroom, + u16 strategy) +{ + u16 total_buf_size = (SXE_RX_PKT_BUF_SIZE - headroom); + u32 rx_buf_size; + u16 i = 0; + + if (!num_pb) { + num_pb = 1; + } + + switch (strategy) { + case (PBA_STRATEGY_WEIGHTED): + rx_buf_size = ((total_buf_size * 5 * 2) / (num_pb * 8)); + total_buf_size -= rx_buf_size * (num_pb / 2); + rx_buf_size <<= SXE_RX_PKT_BUF_SIZE_SHIFT; + for (i = 0; i < (num_pb / 2); i++) { + SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size); + } + fallthrough; + case (PBA_STRATEGY_EQUAL): + rx_buf_size = (total_buf_size / (num_pb - i)) + << SXE_RX_PKT_BUF_SIZE_SHIFT; + for (; i < num_pb; i++) { + SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), rx_buf_size); + } + break; + + default: + break; + } + + for (; i < SXE_PKG_BUF_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0); + } + + return; +} + +u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb) +{ + return SXE_REG_READ(hw, SXE_RXPBSIZE(pb)); +} + +void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw, + u8 tcs, bool is_4q_per_pool, + bool sriov_enable) +{ + u32 mrqc = SXE_REG_READ(hw, SXE_MRQC); + + mrqc &= ~SXE_MRQE_MASK; + + if (sriov_enable) { + if (tcs > 4) { + mrqc |= SXE_MRQC_VMDQRT8TCEN; + } else if (tcs > 1) { + mrqc |= SXE_MRQC_VMDQRT4TCEN; + } else if (is_4q_per_pool == true) { + mrqc |= SXE_MRQC_VMDQRSS32EN; + } else { + mrqc |= SXE_MRQC_VMDQRSS64EN; + } + } else { + if (tcs > 4) { + mrqc |= SXE_MRQC_RTRSS8TCEN; + } else if (tcs > 1) { + mrqc |= SXE_MRQC_RTRSS4TCEN; + } else { + mrqc |= SXE_MRQC_RSSEN; + } + } + + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +static void sxe_hw_rss_hash_pkt_type_set(struct sxe_hw *hw, u32 version) +{ + u32 mrqc = 0; + u32 rss_field = 0; + + rss_field |= SXE_MRQC_RSS_FIELD_IPV4 | + SXE_MRQC_RSS_FIELD_IPV4_TCP | + SXE_MRQC_RSS_FIELD_IPV6 | + SXE_MRQC_RSS_FIELD_IPV6_TCP; + + if (version == SXE_RSS_IP_VER_4) { + rss_field |= SXE_MRQC_RSS_FIELD_IPV4_UDP; + } + if (version == SXE_RSS_IP_VER_6) { + rss_field |= SXE_MRQC_RSS_FIELD_IPV6_UDP; + } + + mrqc |= rss_field; + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +static void sxe_hw_rss_hash_pkt_type_update(struct sxe_hw *hw, + u32 version) +{ + u32 mrqc; + + mrqc = SXE_REG_READ(hw, SXE_MRQC); + + mrqc |= SXE_MRQC_RSS_FIELD_IPV4 + | SXE_MRQC_RSS_FIELD_IPV4_TCP + | SXE_MRQC_RSS_FIELD_IPV6 + | SXE_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(SXE_MRQC_RSS_FIELD_IPV4_UDP | + SXE_MRQC_RSS_FIELD_IPV6_UDP); + + if (version == SXE_RSS_IP_VER_4) { + mrqc |= SXE_MRQC_RSS_FIELD_IPV4_UDP; + } + if (version == SXE_RSS_IP_VER_6) { + mrqc |= SXE_MRQC_RSS_FIELD_IPV6_UDP; + } + + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +static void sxe_hw_rss_rings_used_set(struct sxe_hw *hw, u32 rss_num, + u16 pool, u16 pf_offset) +{ + u32 psrtype = 0; + + if (rss_num > 3) { + psrtype |= 2u << 29; + } else if (rss_num > 1) { + psrtype |= 1u << 29; + } + + while (pool--) { + SXE_REG_WRITE(hw, SXE_PSRTYPE(pf_offset + pool), psrtype); + } + + return; +} + +void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key) +{ + u32 i; + + for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) { + SXE_REG_WRITE(hw, SXE_RSSRK(i), rss_key[i]); + } + + return; +} + +void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw, + u16 reg_idx, u32 value) +{ + SXE_REG_WRITE(hw, SXE_RETA(reg_idx >> 2), value); + return; +} + +void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl) +{ + u32 i; + u32 tbl = 0; + u32 indices_multi = 0x1; + + + for (i = 0; i < SXE_MAX_RETA_ENTRIES; i++) { + tbl |= indices_multi * redir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + sxe_hw_rss_redir_tbl_reg_write(hw, i, tbl); + tbl = 0; + } + } + return; +} + +void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw) +{ + u32 rxctrl; + + if (hw->mac.set_lben) { + u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC); + pfdtxgswc |= SXE_PFDTXGSWC_VT_LBEN; + SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + + rxctrl = SXE_REG_READ(hw, SXE_RXCTRL); + rxctrl |= SXE_RXCTRL_RXEN; + SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl); + + return; +} + +void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw) +{ + u32 rxctrl; + + rxctrl = SXE_REG_READ(hw, SXE_RXCTRL); + if (rxctrl & SXE_RXCTRL_RXEN) { + u32 pfdtxgswc = SXE_REG_READ(hw, SXE_PFDTXGSWC); + if (pfdtxgswc & SXE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~SXE_PFDTXGSWC_VT_LBEN; + SXE_REG_WRITE(hw, SXE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + rxctrl &= ~SXE_RXCTRL_RXEN; + SXE_REG_WRITE(hw, SXE_RXCTRL, rxctrl); + } + + return; +} + +static void sxe_hw_rx_func_switch_on(struct sxe_hw *hw) +{ + u32 rxctrl; + + rxctrl = SXE_REG_READ(hw, SXE_COMCTRL); + rxctrl |= SXE_COMCTRL_RXEN | SXE_COMCTRL_EDSEL; + SXE_REG_WRITE(hw, SXE_COMCTRL, rxctrl); + + return; +} + +void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on) +{ + u32 dbucfg; + + dbucfg = SXE_REG_READ(hw, SXE_DTXCFG); + + if (is_on) { + dbucfg |= SXE_DTXCFG_DBUTX_START; + dbucfg |= SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG; + SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg); + } else { + dbucfg &= ~SXE_DTXCFG_DBUTX_START; + SXE_REG_WRITE(hw, SXE_DTXCFG, dbucfg); + } + + return; +} + +void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb) +{ + u32 i, tx_pkt_size; + + if (!num_pb){ + num_pb = 1; + } + + tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb; + for (i = 0; i < num_pb; i++) { + SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), tx_pkt_size); + } + + for (; i < SXE_PKG_BUF_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_TXPBSIZE(i), 0); + } + + return; +} + +void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on) +{ + u32 lro_dbu = SXE_REG_READ(hw, SXE_LRODBU); + + if (is_on) { + lro_dbu &= ~SXE_LRODBU_LROACKDIS; + } else { + lro_dbu |= SXE_LRODBU_LROACKDIS; + } + + SXE_REG_WRITE(hw, SXE_LRODBU, lro_dbu); + + return; +} + +static void sxe_hw_vf_rx_switch(struct sxe_hw *hw, + u32 reg_offset, u32 vf_index, bool is_off) +{ + u32 vfre = SXE_REG_READ(hw, SXE_VFRE(reg_offset)); + if (is_off) { + vfre &= ~BIT(vf_index); + } else { + vfre |= BIT(vf_index); + } + + SXE_REG_WRITE(hw, SXE_VFRE(reg_offset), vfre); + + return; +} + +STATIC s32 sxe_hw_fnav_wait_init_done(struct sxe_hw *hw) +{ + u32 i; + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + for (i = 0; i < SXE_FNAV_INIT_DONE_POLL; i++) { + if (SXE_REG_READ(hw, SXE_FNAVCTRL) & + SXE_FNAVCTRL_INIT_DONE) { + break; + } + + usleep_range(1000, 2000); + } + + if (i >= SXE_FNAV_INIT_DONE_POLL) { + LOG_DEV_DEBUG("flow navigator poll time exceeded!\n"); + ret = -SXE_ERR_FNAV_REINIT_FAILED; + } + + return ret; +} + +void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl) +{ + u32 fnavctrl_ori; + bool is_clear_stat = false; + + SXE_REG_WRITE(hw, SXE_FNAVHKEY, SXE_FNAV_BUCKET_HASH_KEY); + SXE_REG_WRITE(hw, SXE_FNAVSKEY, SXE_FNAV_SAMPLE_HASH_KEY); + + fnavctrl_ori = SXE_REG_READ(hw, SXE_FNAVCTRL); + if((fnavctrl_ori & 0x13) != (fnavctrl & 0x13)) { + is_clear_stat = true; + } + + SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl); + SXE_WRITE_FLUSH(hw); + + sxe_hw_fnav_wait_init_done(hw); + + if(is_clear_stat) { + SXE_REG_READ(hw, SXE_FNAVUSTAT); + SXE_REG_READ(hw, SXE_FNAVFSTAT); + SXE_REG_READ(hw, SXE_FNAVMATCH); + SXE_REG_READ(hw, SXE_FNAVMISS); + SXE_REG_READ(hw, SXE_FNAVLEN); + } + + return; +} + +static s32 sxe_hw_fnav_mode_init(struct sxe_hw *hw, + u32 fnavctrl, u32 sxe_fnav_mode) +{ + struct sxe_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("fnavctrl=0x%x, sxe_fnav_mode=%u\n", fnavctrl, sxe_fnav_mode); + + if ((sxe_fnav_mode != SXE_FNAV_SAMPLE_MODE) && + (sxe_fnav_mode != SXE_FNAV_SPECIFIC_MODE)) { + LOG_ERROR_BDF("mode[%u] a error fnav mode, fnav do not work. please use" + "SXE_FNAV_SAMPLE_MODE or SXE_FNAV_SPECIFIC_MODE\n", + sxe_fnav_mode); + goto l_end; + } + + if (sxe_fnav_mode == SXE_FNAV_SPECIFIC_MODE) { + fnavctrl |= SXE_FNAVCTRL_SPECIFIC_MATCH | + (SXE_FNAV_DROP_QUEUE << SXE_FNAVCTRL_DROP_Q_SHIFT); + } + + fnavctrl |= (0x6 << SXE_FNAVCTRL_FLEX_SHIFT) | + (0xA << SXE_FNAVCTRL_MAX_LENGTH_SHIFT) | + (4 << SXE_FNAVCTRL_FULL_THRESH_SHIFT); + + sxe_hw_fnav_enable(hw, fnavctrl); + +l_end: + return 0; +} + +u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask) +{ + u32 mask = ntohs(dst_port_mask); + + mask <<= SXE_FNAVTCPM_DPORTM_SHIFT; + mask |= ntohs(src_port_mask); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +static s32 sxe_hw_fnav_vm_pool_mask_get(struct sxe_hw *hw, + u8 vm_pool, u32 *fnavm) +{ + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + switch (vm_pool & SXE_SAMPLE_VM_POOL_MASK) { + case 0x0: + *fnavm |= SXE_FNAVM_POOL; + fallthrough; + case 0x7F: + break; + default: + LOG_DEV_ERR("error on vm pool mask\n"); + ret = -SXE_ERR_CONFIG; + } + + return ret; +} + +static s32 sxe_hw_fnav_flow_type_mask_get(struct sxe_hw *hw, + union sxe_fnav_rule_info *input_mask, + u32 *fnavm) +{ + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + switch (input_mask->ntuple.flow_type & SXE_SAMPLE_L4TYPE_MASK) { + case 0x0: + *fnavm |= SXE_FNAVM_L4P; + if (input_mask->ntuple.dst_port || + input_mask->ntuple.src_port) { + LOG_DEV_ERR("error on src/dst port mask\n"); + ret = -SXE_ERR_CONFIG; + goto l_ret; + } + break; + case SXE_SAMPLE_L4TYPE_MASK: + break; + default: + LOG_DEV_ERR("error on flow type mask\n"); + ret = -SXE_ERR_CONFIG; + } + +l_ret: + return ret; +} + +static s32 sxe_hw_fnav_vlan_mask_get(struct sxe_hw *hw, + __be16 vlan_id, u32 *fnavm) +{ + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + switch (ntohs(vlan_id) & SXE_SAMPLE_VLAN_MASK) { + case 0x0000: + *fnavm |= SXE_FNAVM_VLANID; + fallthrough; + case 0x0FFF: + *fnavm |= SXE_FNAVM_VLANP; + break; + case 0xE000: + *fnavm |= SXE_FNAVM_VLANID; + fallthrough; + case 0xEFFF: + break; + default: + LOG_DEV_ERR("error on VLAN mask\n"); + ret = -SXE_ERR_CONFIG; + } + + return ret; +} + +static s32 sxe_hw_fnav_flex_bytes_mask_get(struct sxe_hw *hw, + __be16 flex_bytes, u32 *fnavm) +{ + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + switch ((__force u16)flex_bytes & SXE_SAMPLE_FLEX_BYTES_MASK) { + case 0x0000: + *fnavm |= SXE_FNAVM_FLEX; + fallthrough; + case 0xFFFF: + break; + default: + LOG_DEV_ERR("error on flexible byte mask\n"); + ret = -SXE_ERR_CONFIG; + } + + return ret; +} + +s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input_mask) +{ + s32 ret; + u32 fnavm = SXE_FNAVM_DIPv6; + u32 fnavtcpm; + struct sxe_adapter *adapter = hw->adapter; + + + if (input_mask->ntuple.bkt_hash) { + LOG_DEV_ERR("bucket hash should always be 0 in mask\n"); + } + + ret = sxe_hw_fnav_vm_pool_mask_get(hw, input_mask->ntuple.vm_pool, &fnavm); + if (ret) { + goto l_err_config; + } + + ret = sxe_hw_fnav_flow_type_mask_get(hw, input_mask, &fnavm); + if (ret) { + goto l_err_config; + } + + ret = sxe_hw_fnav_vlan_mask_get(hw, input_mask->ntuple.vlan_id, &fnavm); + if (ret) { + goto l_err_config; + } + + ret = sxe_hw_fnav_flex_bytes_mask_get(hw, input_mask->ntuple.flex_bytes, &fnavm); + if (ret) { + goto l_err_config; + } + + LOG_DEBUG_BDF("fnavm = 0x%x\n", fnavm); + SXE_REG_WRITE(hw, SXE_FNAVM, fnavm); + + fnavtcpm = sxe_hw_fnav_port_mask_get(input_mask->ntuple.src_port, + input_mask->ntuple.dst_port); + + LOG_DEBUG_BDF("fnavtcpm = 0x%x\n", fnavtcpm); + SXE_REG_WRITE(hw, SXE_FNAVTCPM, ~fnavtcpm); + SXE_REG_WRITE(hw, SXE_FNAVUDPM, ~fnavtcpm); + + SXE_REG_WRITE_BE32(hw, SXE_FNAVSIP4M, + ~input_mask->ntuple.src_ip[0]); + SXE_REG_WRITE_BE32(hw, SXE_FNAVDIP4M, + ~input_mask->ntuple.dst_ip[0]); + + return 0; + +l_err_config: + return -SXE_ERR_CONFIG; +} + +STATIC s32 sxe_hw_fnav_cmd_complete_check(struct sxe_hw *hw, + u32 *fnavcmd) +{ + u32 i; + + for (i = 0; i < SXE_FNAVCMD_CMD_POLL * 10; i++) { + *fnavcmd = SXE_REG_READ(hw, SXE_FNAVCMD); + if (!(*fnavcmd & SXE_FNAVCMD_CMD_MASK)) { + return 0; + } + + udelay(10); + } + + return -SXE_ERR_FNAV_CMD_INCOMPLETE; +} + +static void sxe_hw_fnav_filter_ip_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input) +{ + SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(0), + input->ntuple.src_ip[0]); + SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(1), + input->ntuple.src_ip[1]); + SXE_REG_WRITE_BE32(hw, SXE_FNAVSIPv6(2), + input->ntuple.src_ip[2]); + + SXE_REG_WRITE_BE32(hw, SXE_FNAVIPSA, input->ntuple.src_ip[0]); + + SXE_REG_WRITE_BE32(hw, SXE_FNAVIPDA, input->ntuple.dst_ip[0]); + + return; +} + +static void sxe_hw_fnav_filter_port_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input) +{ + u32 fnavport; + + fnavport = be16_to_cpu(input->ntuple.dst_port); + fnavport <<= SXE_FNAVPORT_DESTINATION_SHIFT; + fnavport |= be16_to_cpu(input->ntuple.src_port); + SXE_REG_WRITE(hw, SXE_FNAVPORT, fnavport); + + return; +} + +static void sxe_hw_fnav_filter_vlan_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input) +{ + u32 fnavvlan; + + fnavvlan = ntohs(SXE_SWAP_16(input->ntuple.flex_bytes)); + fnavvlan <<= SXE_FNAVVLAN_FLEX_SHIFT; + fnavvlan |= ntohs(input->ntuple.vlan_id); + SXE_REG_WRITE(hw, SXE_FNAVVLAN, fnavvlan); + + return; +} + +static void sxe_hw_fnav_filter_bkt_hash_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u16 soft_id) +{ + u32 fnavhash; + + fnavhash = (__force u32)input->ntuple.bkt_hash; + fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT; + SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash); + + return; +} + +static s32 sxe_hw_fnav_filter_cmd_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u8 queue) +{ + u32 fnavcmd; + s32 ret; + struct sxe_adapter *adapter = hw->adapter; + + fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE | + SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN; + +#ifndef SXE_DPDK + if (queue == SXE_FNAV_DROP_QUEUE) { + fnavcmd |= SXE_FNAVCMD_DROP; + } +#endif + + fnavcmd |= input->ntuple.flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT; + fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT; + fnavcmd |= (u32)input->ntuple.vm_pool << SXE_FNAVCMD_VT_POOL_SHIFT; + + SXE_REG_WRITE(hw, SXE_FNAVCMD, fnavcmd); + ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd); + if (ret) { + LOG_DEV_ERR("flow navigator command did not complete!\n"); + } + + return ret; +} + +s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u16 soft_id, u8 queue) +{ + s32 ret; + struct sxe_adapter *adapter = hw->adapter; + + sxe_hw_fnav_filter_ip_set(hw, input); + + sxe_hw_fnav_filter_port_set(hw, input); + + sxe_hw_fnav_filter_vlan_set(hw, input); + + sxe_hw_fnav_filter_bkt_hash_set(hw, input, soft_id); + + SXE_WRITE_FLUSH(hw); + + ret = sxe_hw_fnav_filter_cmd_set(hw, input, queue); + if (ret) { + LOG_ERROR_BDF("set fnav filter cmd error. ret=%d\n", ret); + } + + return ret; +} + +s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u16 soft_id) +{ + u32 fnavhash; + u32 fnavcmd; + s32 ret; + struct sxe_adapter *adapter = hw->adapter; + + + fnavhash = (__force u32)input->ntuple.bkt_hash; + fnavhash |= soft_id << SXE_FNAVHASH_SIG_SW_INDEX_SHIFT; + SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash); + + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_QUERY_REM_FILT); + + ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd); + if (ret) { + LOG_DEV_ERR("flow navigator command did not complete!\n"); + return ret; + } + + if (fnavcmd & SXE_FNAVCMD_FILTER_VALID) { + SXE_REG_WRITE(hw, SXE_FNAVHASH, fnavhash); + SXE_WRITE_FLUSH(hw); + SXE_REG_WRITE(hw, SXE_FNAVCMD, + SXE_FNAVCMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw, + u8 flow_type, u32 hash_value, u8 queue) +{ + u32 fnavcmd; + u64 fnavhashcmd; + struct sxe_adapter *adapter = hw->adapter; + + fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE | + SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN; + fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT; + fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT; + + fnavhashcmd = (u64)fnavcmd << 32; + fnavhashcmd |= hash_value; + SXE_REG64_WRITE(hw, SXE_FNAVHASH, fnavhashcmd); + + LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd); + + return; +} + +static u64 sxe_hw_fnav_sample_rule_hash_get(struct sxe_hw *hw, + u8 flow_type, u32 hash_value, u8 queue) +{ + u32 fnavcmd; + u64 fnavhashcmd; + struct sxe_adapter *adapter = hw->adapter; + + fnavcmd = SXE_FNAVCMD_CMD_ADD_FLOW | SXE_FNAVCMD_FILTER_UPDATE | + SXE_FNAVCMD_LAST | SXE_FNAVCMD_QUEUE_EN; + fnavcmd |= (u32)flow_type << SXE_FNAVCMD_FLOW_TYPE_SHIFT; + fnavcmd |= (u32)queue << SXE_FNAVCMD_RX_QUEUE_SHIFT; + + fnavhashcmd = (u64)fnavcmd << 32; + fnavhashcmd |= hash_value; + + LOG_DEV_DEBUG("tx queue=%x hash=%x\n", queue, (u32)fnavhashcmd); + + return fnavhashcmd; +} + +static s32 sxe_hw_fnav_sample_hash_cmd_get(struct sxe_hw *hw, + u8 flow_type, + u32 hash_value, + u8 queue, u64 *hash_cmd) +{ + s32 ret = 0; + u8 pkg_type; + struct sxe_adapter *adapter = hw->adapter; + + pkg_type = flow_type & SXE_SAMPLE_FLOW_TYPE_MASK; + switch (pkg_type) { + case SXE_SAMPLE_FLOW_TYPE_TCPV4: + case SXE_SAMPLE_FLOW_TYPE_UDPV4: + case SXE_SAMPLE_FLOW_TYPE_SCTPV4: + case SXE_SAMPLE_FLOW_TYPE_TCPV6: + case SXE_SAMPLE_FLOW_TYPE_UDPV6: + case SXE_SAMPLE_FLOW_TYPE_SCTPV6: + break; + default: + LOG_DEV_ERR("error on flow type input\n"); + ret = -SXE_ERR_CONFIG; + goto l_end; + } + + *hash_cmd = sxe_hw_fnav_sample_rule_hash_get(hw, pkg_type, hash_value, queue); + +l_end: + return ret; +} + +static s32 sxe_hw_fnav_single_sample_rule_del(struct sxe_hw *hw, + u32 hash) +{ + u32 fdircmd; + s32 ret; + struct sxe_adapter *adapter = hw->adapter; + + SXE_REG_WRITE(hw, SXE_FNAVHASH, hash); + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_FNAVCMD, SXE_FNAVCMD_CMD_REMOVE_FLOW); + ret = sxe_hw_fnav_cmd_complete_check(hw, &fdircmd); + if (ret) { + LOG_DEV_ERR("flow navigator previous command did not complete," + "aborting table re-initialization.\n"); + } + + return ret; +} + +s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw) +{ + u32 fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL); + u32 fnavcmd; + s32 ret; + struct sxe_adapter *adapter = hw->adapter; + + fnavctrl &= ~SXE_FNAVCTRL_INIT_DONE; + + ret = sxe_hw_fnav_cmd_complete_check(hw, &fnavcmd); + if (ret) { + LOG_DEV_ERR("flow navigator previous command did not complete," + "aborting table re-initialization.\n"); + goto l_ret; + } + + SXE_REG_WRITE(hw, SXE_FNAVFREE, 0); + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_FNAVCMD, + (SXE_REG_READ(hw, SXE_FNAVCMD) | + SXE_FNAVCMD_CLEARHT)); + SXE_WRITE_FLUSH(hw); + SXE_REG_WRITE(hw, SXE_FNAVCMD, + (SXE_REG_READ(hw, SXE_FNAVCMD) & + ~SXE_FNAVCMD_CLEARHT)); + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_FNAVHASH, 0x00); + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl); + SXE_WRITE_FLUSH(hw); + + ret = sxe_hw_fnav_wait_init_done(hw); + if (ret) { + LOG_ERROR_BDF("flow navigator simple poll time exceeded!\n"); + goto l_ret; + } + + SXE_REG_READ(hw, SXE_FNAVUSTAT); + SXE_REG_READ(hw, SXE_FNAVFSTAT); + SXE_REG_READ(hw, SXE_FNAVMATCH); + SXE_REG_READ(hw, SXE_FNAVMISS); + SXE_REG_READ(hw, SXE_FNAVLEN); + +l_ret: + return ret; +} + +static void sxe_hw_fnav_sample_stats_reinit(struct sxe_hw *hw) +{ + SXE_REG_READ(hw, SXE_FNAVUSTAT); + SXE_REG_READ(hw, SXE_FNAVFSTAT); + SXE_REG_READ(hw, SXE_FNAVMATCH); + SXE_REG_READ(hw, SXE_FNAVMISS); + SXE_REG_READ(hw, SXE_FNAVLEN); + + return; +} + +static void sxe_hw_ptp_freq_adjust(struct sxe_hw *hw, u32 adj_freq) +{ + SXE_REG_WRITE(hw, SXE_TIMADJL, 0); + SXE_REG_WRITE(hw, SXE_TIMADJH, adj_freq); + SXE_WRITE_FLUSH(hw); + + return; +} + +u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw) +{ + struct sxe_adapter *adapter = hw->adapter; + u32 systiml; + u32 systimm; + u64 ns; + + systiml = SXE_REG_READ(hw, SXE_SYSTIML); + systimm = SXE_REG_READ(hw, SXE_SYSTIMM); + ns = SXE_TIME_TO_NS(systiml, systimm); + + LOG_DEBUG_BDF("get ptp hw systime systiml=%u, systimm=%u, ns=%"SXE_PRIU64"\n", + systiml, systimm, ns); + return ns; +} + +void sxe_hw_ptp_systime_init(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_SYSTIML, 0); + SXE_REG_WRITE(hw, SXE_SYSTIMM, 0); + SXE_REG_WRITE(hw, SXE_SYSTIMH, 0); + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_ptp_init(struct sxe_hw *hw) +{ + u32 regval; + u32 tsctl = SXE_TSCTRL_TSEN | + SXE_TSCTRL_VER_2 | + SXE_TSCTRL_PTYP_ALL | + SXE_TSCTRL_L4_UNICAST; + + regval = SXE_REG_READ(hw, SXE_TSCTRL); + regval &= ~SXE_TSCTRL_ONESTEP; + regval &= ~SXE_TSCTRL_CSEN; + regval |= tsctl; + SXE_REG_WRITE(hw, SXE_TSCTRL, regval); + + SXE_REG_WRITE(hw, SXE_TIMINC, + SXE_TIMINC_SET(SXE_INCPD, SXE_IV_NS, SXE_IV_SNS)); + + return; +} + +void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw) +{ + SXE_REG_READ(hw, SXE_RXSTMPH); + return; +} + +void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw, + u32 *ts_sec, u32 *ts_ns) +{ + u32 reg_sec; + u32 reg_ns; + u32 sec_8bit; + u32 sec_24bit; + u32 systimm; + u32 systimm_8bit; + u32 systimm_24bit; + + SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC0); + reg_ns = SXE_REG_READ(hw, SXE_TXSTMP_VAL); + SXE_REG64_WRITE(hw, SXE_TXSTMP_SEL, SXE_TXTS_MAGIC1); + reg_sec = SXE_REG_READ(hw, SXE_TXSTMP_VAL); + systimm = SXE_REG_READ(hw, SXE_SYSTIMM); + + + sec_8bit = reg_sec & 0x000000FF; + sec_24bit = (reg_sec >> 8) & 0x00FFFFFF; + + systimm_24bit = systimm & 0x00FFFFFF; + systimm_8bit = systimm & 0xFF000000; + + *ts_ns = (sec_8bit << 24) | ((reg_ns & 0xFFFFFF00) >> 8); + + if (unlikely((sec_24bit - systimm_24bit) >= 0x00FFFFF0)) { + if (systimm_8bit >= 1) { + systimm_8bit -= 1; + } + } + + *ts_sec = systimm_8bit | sec_24bit; + return; +} + +u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw) +{ + struct sxe_adapter *adapter = hw->adapter; + u32 rxtsl; + u32 rxtsh; + u64 ns; + + rxtsl = SXE_REG_READ(hw, SXE_RXSTMPL); + rxtsh = SXE_REG_READ(hw, SXE_RXSTMPH); + ns = SXE_TIME_TO_NS(rxtsl, rxtsh); + + LOG_DEBUG_BDF("ptp get rx ptp timestamp low=%u, high=%u, ns=%"SXE_PRIU64"\n", + rxtsl, rxtsh, ns); + return ns; +} + +bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw) +{ + bool rx_tmstamp_valid = false; + u32 tsyncrxctl; + + tsyncrxctl = SXE_REG_READ(hw, SXE_TSYNCRXCTL); + if (tsyncrxctl & SXE_TSYNCRXCTL_RXTT) { + rx_tmstamp_valid = true; + } + + return rx_tmstamp_valid; +} + +void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw, + bool is_l2, u32 tsctl, u32 tses) +{ + u32 regval; + + if (is_l2) { + SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), + (SXE_ETQF_FILTER_EN | + SXE_ETQF_1588 | + ETH_P_1588)); + } else { + SXE_REG_WRITE(hw, SXE_ETQF(SXE_ETQF_FILTER_1588), 0); + } + + if (tsctl) { + regval = SXE_REG_READ(hw, SXE_TSCTRL); + regval |= tsctl; + SXE_REG_WRITE(hw, SXE_TSCTRL, regval); + } + + SXE_REG_WRITE(hw, SXE_TSES, tses); + + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_TSYNCTXCTL, + (SXE_REG_READ(hw, SXE_TSYNCTXCTL) | + SXE_TSYNCTXCTL_TEN)); + + SXE_REG_WRITE(hw, SXE_TSYNCRXCTL, + (SXE_REG_READ(hw, SXE_TSYNCRXCTL) | + SXE_TSYNCRXCTL_REN)); + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_dcb_tc_rss_configure(struct sxe_hw *hw, u16 rss) +{ + u32 msb = 0; + + while (rss) { + msb++; + rss >>= 1; + } + + SXE_REG_WRITE(hw, SXE_RQTC, msb * SXE_8_TC_MSB); +} + +static void sxe_hw_tx_ring_disable(struct sxe_hw *hw, u8 reg_idx, + unsigned long timeout) +{ + unsigned long wait_delay, delay_interval; + int wait_loop; + u32 txdctl; + struct sxe_adapter *adapter = hw->adapter; + + txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + txdctl &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + + delay_interval = timeout / 100; + + wait_loop = SXE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + + if (!(txdctl & SXE_TXDCTL_ENABLE)) { + return; + } + } + + LOG_MSG_ERR(drv, "register TXDCTL.ENABLE not cleared within the polling period\n"); +} + +static void sxe_hw_rx_ring_disable(struct sxe_hw *hw, u8 reg_idx, + unsigned long timeout) +{ + unsigned long wait_delay, delay_interval; + int wait_loop; + u32 rxdctl; + struct sxe_adapter *adapter = hw->adapter; + + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + rxdctl &= ~SXE_RXDCTL_ENABLE; + + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + + delay_interval = timeout / 100; + + wait_loop = SXE_MAX_RX_DESC_POLL; + wait_delay = delay_interval; + + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + + if (!(rxdctl & SXE_RXDCTL_ENABLE)) + return; + } + + LOG_MSG_ERR(drv, "register RXDCTL.ENABLE not cleared within the polling period\n"); +} + +static u32 sxe_hw_tx_dbu_fc_status_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_TXPBFCS); +} + +static void sxe_hw_fnav_sample_hash_set(struct sxe_hw *hw, u64 hash) +{ + SXE_REG64_WRITE(hw, SXE_FNAVHASH, hash); + return; +} + +static const struct sxe_dbu_operations sxe_dbu_ops = { + .rx_pkt_buf_size_configure = sxe_hw_rx_pkt_buf_size_configure, + .rx_pkt_buf_switch = sxe_hw_rx_pkt_buf_switch, + .rx_multi_ring_configure = sxe_hw_rx_multi_ring_configure, + .rss_key_set_all = sxe_hw_rss_key_set_all, + .rss_redir_tbl_set_all = sxe_hw_rss_redir_tbl_set_all, + .rx_cap_switch_on = sxe_hw_rx_cap_switch_on, + .rx_cap_switch_off = sxe_hw_rx_cap_switch_off, + .rss_hash_pkt_type_set = sxe_hw_rss_hash_pkt_type_set, + .rss_hash_pkt_type_update = sxe_hw_rss_hash_pkt_type_update, + .rss_rings_used_set = sxe_hw_rss_rings_used_set, + .lro_ack_switch = sxe_hw_rx_lro_ack_switch, + + .fnav_mode_init = sxe_hw_fnav_mode_init, + .fnav_specific_rule_mask_set = sxe_hw_fnav_specific_rule_mask_set, + .fnav_specific_rule_add = sxe_hw_fnav_specific_rule_add, + .fnav_specific_rule_del = sxe_hw_fnav_specific_rule_del, + .fnav_sample_hash_cmd_get = sxe_hw_fnav_sample_hash_cmd_get, + .fnav_sample_stats_reinit = sxe_hw_fnav_sample_stats_reinit, + .fnav_sample_hash_set = sxe_hw_fnav_sample_hash_set, + .fnav_single_sample_rule_del = sxe_hw_fnav_single_sample_rule_del, + + .tx_pkt_buf_switch = sxe_hw_tx_pkt_buf_switch, + .tx_pkt_buf_size_configure = sxe_hw_tx_pkt_buf_size_configure, + + .ptp_init = sxe_hw_ptp_init, + .ptp_freq_adjust = sxe_hw_ptp_freq_adjust, + .ptp_systime_init = sxe_hw_ptp_systime_init, + .ptp_systime_get = sxe_hw_ptp_systime_get, + .ptp_tx_timestamp_get = sxe_hw_ptp_tx_timestamp_get, + .ptp_timestamp_mode_set = sxe_hw_ptp_timestamp_mode_set, + .ptp_timestamp_enable = sxe_hw_ptp_timestamp_enable, + .ptp_rx_timestamp_clear = sxe_hw_ptp_rx_timestamp_clear, + .ptp_rx_timestamp_get = sxe_hw_ptp_rx_timestamp_get, + .ptp_is_rx_timestamp_valid = sxe_hw_ptp_is_rx_timestamp_valid, + + .dcb_tc_rss_configure = sxe_hw_dcb_tc_rss_configure, + .vf_rx_switch = sxe_hw_vf_rx_switch, + .rx_pkt_buf_size_get = sxe_hw_rx_pkt_buf_size_get, + .rx_func_switch_on = sxe_hw_rx_func_switch_on, + + .tx_ring_disable = sxe_hw_tx_ring_disable, + .rx_ring_disable = sxe_hw_rx_ring_disable, + + .tx_dbu_fc_status_get = sxe_hw_tx_dbu_fc_status_get, +}; + + +void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw) +{ + u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL); + + rx_dma_ctrl &= ~SXE_RDRXCTL_LROFRSTSIZE; + SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl); + return; +} + +void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw) +{ + u32 rx_dma_ctrl = SXE_REG_READ(hw, SXE_RDRXCTL); + + rx_dma_ctrl |= SXE_RDRXCTL_LROACKC; + SXE_REG_WRITE(hw, SXE_RDRXCTL, rx_dma_ctrl); + return; +} + +void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx) +{ + u32 rxdctl; + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + rxdctl |= 0x40 << SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT; + rxdctl |= 0x2 << SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT; + rxdctl |= 0x10; + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + + return; +} + +void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on) +{ + u32 rxdctl; + u32 wait_loop = SXE_RING_WAIT_LOOP; + struct sxe_adapter *adapter = hw->adapter; + + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + if (is_on) { + rxdctl |= SXE_RXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + + do { + usleep_range(1000, 2000); + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & SXE_RXDCTL_ENABLE)); + } else { + rxdctl &= ~SXE_RXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + + do { + usleep_range(1000, 2000); + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & SXE_RXDCTL_ENABLE)); + } + + SXE_WRITE_FLUSH(hw); + + if (!wait_loop) { + LOG_MSG_ERR(drv, "rx ring %u switch %u failed within " + "the polling period\n", reg_idx, is_on); + } + + return; +} + +void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on) +{ + u32 rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); + if (is_on) { + rxdctl |= SXE_RXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + } else { + rxdctl &= ~SXE_RXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_idx), rxdctl); + } + + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw, + u8 reg_idx, u32 rdh_value, + u32 rdt_value) +{ + SXE_REG_WRITE(hw, SXE_RDH(reg_idx), rdh_value); + SXE_REG_WRITE(hw, SXE_RDT(reg_idx), rdt_value); + return; +} + +static void sxe_hw_rx_ring_head_init(struct sxe_hw *hw, u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_RDH(reg_idx), 0); + + return; +} + +static void sxe_hw_rx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_RDT(reg_idx), 0); + + return; +} + +void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw, + u32 desc_mem_len, u64 desc_dma_addr, + u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_RDBAL(reg_idx), + (desc_dma_addr & DMA_BIT_MASK(32))); + SXE_REG_WRITE(hw, SXE_RDBAH(reg_idx), (desc_dma_addr >> 32)); + SXE_REG_WRITE(hw, SXE_RDLEN(reg_idx), desc_mem_len); + + SXE_WRITE_FLUSH(hw); + + sxe_hw_rx_ring_head_init(hw, reg_idx); + sxe_hw_rx_ring_tail_init(hw, reg_idx); + + return; +} + +void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx, + u32 header_buf_len, u32 pkg_buf_len + ) +{ + u32 srrctl; + + srrctl = ((header_buf_len << SXE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + SXE_SRRCTL_BSIZEHDR_MASK); + srrctl |= ((pkg_buf_len >> SXE_SRRCTL_BSIZEPKT_SHIFT) & + SXE_SRRCTL_BSIZEPKT_MASK); + + SXE_REG_WRITE(hw, SXE_SRRCTL(reg_idx), srrctl); + + return; +} + +void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw, + u8 reg_idx, u32 max_desc) +{ + u32 lroctrl; + lroctrl = SXE_REG_READ(hw, SXE_LROCTL(reg_idx)); + lroctrl |= SXE_LROCTL_LROEN; + lroctrl |= max_desc; + SXE_REG_WRITE(hw, SXE_LROCTL(reg_idx), lroctrl); + + return; +} + +static u32 sxe_hw_rx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx) +{ + return SXE_REG_READ(hw, SXE_RXDCTL(reg_idx)); +} + +static void sxe_hw_dcb_arbiter_set(struct sxe_hw *hw, bool is_enable) +{ + u32 rttdcs; + + rttdcs = SXE_REG_READ(hw, SXE_RTTDCS); + + if (true == is_enable) { + rttdcs &= ~SXE_RTTDCS_ARBDIS; + rttdcs &= ~SXE_RTTDCS_BPBFSM; + + SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs); + } else { + rttdcs |= SXE_RTTDCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTTDCS, rttdcs); + } + + return; +} + + +static void sxe_hw_tx_multi_ring_configure(struct sxe_hw *hw, u8 tcs, + u16 pool_mask, bool sriov_enable, u16 max_txq) +{ + u32 mtqc; + + sxe_hw_dcb_arbiter_set(hw, false); + + if (true == sriov_enable) { + mtqc = SXE_MTQC_VT_ENA; + if (tcs > SXE_DCB_4_TC) + mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ; + else if (tcs > SXE_DCB_1_TC) + mtqc |= SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ; + else if (pool_mask == SXE_4Q_PER_POOL_MASK) + mtqc |= SXE_MTQC_32VF; + else + mtqc |= SXE_MTQC_64VF; + } else { + if (tcs > SXE_DCB_4_TC) { + mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ; + } else if (tcs > SXE_DCB_1_TC) { + mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ; + } else { + if (max_txq > 63) { + mtqc = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ; + } else { + mtqc = SXE_MTQC_64Q_1PB; + } + } + } + + SXE_REG_WRITE(hw, SXE_MTQC, mtqc); + + sxe_hw_dcb_arbiter_set(hw, true); + + return; +} + +void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_TDH(reg_idx), 0); + + return; +} + +void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_TDT(reg_idx), 0); + + return; +} + +void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw, + u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx) +{ + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), 0); + + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_TDBAL(reg_idx), (desc_dma_addr & \ + DMA_BIT_MASK(32))); + SXE_REG_WRITE(hw, SXE_TDBAH(reg_idx), (desc_dma_addr >> 32)); + SXE_REG_WRITE(hw, SXE_TDLEN(reg_idx), desc_mem_len); + sxe_hw_tx_ring_head_init(hw, reg_idx); + sxe_hw_tx_ring_tail_init(hw, reg_idx); + + return; +} + +void sxe_hw_tx_desc_thresh_set( + struct sxe_hw *hw, + u8 reg_idx, + u32 wb_thresh, + u32 host_thresh, + u32 prefech_thresh) +{ + u32 txdctl = 0; + + txdctl |= (wb_thresh << SXE_TXDCTL_WTHRESH_SHIFT); + txdctl |= (host_thresh << SXE_TXDCTL_HTHRESH_SHIFT) | prefech_thresh; + + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + + return; +} + +void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max) +{ + u32 i, value; + + for (i = 0; i < ring_max; i++) { + value = SXE_REG_READ(hw, SXE_TXDCTL(i)); + value &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(i), value); + + value = SXE_REG_READ(hw, SXE_RXDCTL(i)); + value &= ~SXE_RXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_RXDCTL(i), value); + } + + SXE_WRITE_FLUSH(hw); + usleep_range(1000, 2000); + + return; +} + +void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on) +{ + u32 wait_loop = SXE_RING_WAIT_LOOP; + struct sxe_adapter *adapter = hw->adapter; + + u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + if (is_on) { + txdctl |= SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + + do { + usleep_range(1000, 2000); + txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & SXE_TXDCTL_ENABLE)); + } else { + txdctl &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + + do { + usleep_range(1000, 2000); + txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + } while (--wait_loop && (txdctl & SXE_TXDCTL_ENABLE)); + } + + if (!wait_loop) { + LOG_DEV_ERR("tx ring %u switch %u failed within " + "the polling period\n", reg_idx, is_on); + } + + return; +} + +void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on) +{ + u32 txdctl = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + if (is_on) { + txdctl |= SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + } else { + txdctl &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), txdctl); + } + + return; +} + +void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw, + u8 num_pb, bool dcb_enable) +{ + u32 i, tx_pkt_size, tx_pb_thresh; + + if (!num_pb){ + num_pb = 1; + } + + tx_pkt_size = SXE_TX_PBSIZE_MAX / num_pb; + if (true == dcb_enable) { + tx_pb_thresh = (tx_pkt_size / 1024) - SXE_TX_PKT_SIZE_MAX; + } else { + tx_pb_thresh = (tx_pkt_size / 1024) - SXE_NODCB_TX_PKT_SIZE_MAX; + } + + for (i = 0; i < num_pb; i++) { + SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), tx_pb_thresh); + } + + for (; i < SXE_PKG_BUF_NUM_MAX; i++) { + SXE_REG_WRITE(hw, SXE_TXPBTHRESH(i), 0); + } + + return; +} + +void sxe_hw_tx_enable(struct sxe_hw *hw) +{ + u32 ctl; + + ctl = SXE_REG_READ(hw, SXE_DMATXCTL); + ctl |= SXE_DMATXCTL_TE; + SXE_REG_WRITE(hw, SXE_DMATXCTL, ctl); + + return; +} + +static u32 sxe_hw_tx_desc_ctrl_get(struct sxe_hw *hw, u8 reg_idx) +{ + return SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); +} + +static void sxe_hw_tx_desc_wb_thresh_clear(struct sxe_hw *hw, u8 reg_idx) +{ + u32 reg_data; + + reg_data = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + reg_data &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data); + SXE_WRITE_FLUSH(hw); + reg_data &= ~(0x7f<<16); + reg_data |= SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg_data); + + return; +} + +void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw, + u16 reg_index, bool is_enable) +{ + u32 rxdctl; + + rxdctl = SXE_REG_READ(hw, SXE_RXDCTL(reg_index)); + + if (is_enable) { + rxdctl |= SXE_RXDCTL_VME; + } else { + rxdctl &= ~SXE_RXDCTL_VME; + } + + SXE_REG_WRITE(hw, SXE_RXDCTL(reg_index), rxdctl); + + return; +} + +static void sxe_hw_tx_vlan_tag_set(struct sxe_hw *hw, + u16 vid, u16 qos, u32 vf) +{ + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | SXE_VMVIR_VLANA_DEFAULT; + + SXE_REG_WRITE(hw, SXE_VMVIR(vf), vmvir); + return; +} + +void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf) +{ + SXE_REG_WRITE(hw, SXE_VMVIR(vf), 0); + return; +} + +u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf) +{ + return SXE_REG_READ(hw, SXE_VMVIR(vf)); +} + +void sxe_hw_tx_ring_info_get(struct sxe_hw *hw, + u8 idx, u32 *head, u32 *tail) +{ + *head = SXE_REG_READ(hw, SXE_TDH(idx)); + *tail = SXE_REG_READ(hw, SXE_TDT(idx)); + + return; +} + +void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority) +{ + u32 reg; + u32 credit_refill; + u32 credit_max; + u8 i; + + reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTRPCS, reg); + + reg = 0; + for (i = 0; i < max_priority; i++) { + reg |= (prio_tc[i] << (i * SXE_RTRUP2TC_UP_SHIFT)); + } + + SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg); + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; + credit_max = max[i]; + reg = credit_refill | (credit_max << SXE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << SXE_RTRPT4C_BWG_SHIFT; + + if (prio_type[i] == PRIO_LINK) { + reg |= SXE_RTRPT4C_LSP; + } + + SXE_REG_WRITE(hw, SXE_RTRPT4C(i), reg); + } + + reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC; + SXE_REG_WRITE(hw, SXE_RTRPCS, reg); + + return; +} + +void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) +{ + u32 reg, max_credits; + u8 i; + + for (i = 0; i < 128; i++) { + SXE_REG_WRITE(hw, SXE_RTTDQSEL, i); + SXE_REG_WRITE(hw, SXE_RTTDT1C, 0); + } + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << SXE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << SXE_RTTDT2C_BWG_SHIFT; + + if (prio_type[i] == PRIO_GROUP) { + reg |= SXE_RTTDT2C_GSP; + } + + if (prio_type[i] == PRIO_LINK) { + reg |= SXE_RTTDT2C_LSP; + } + + SXE_REG_WRITE(hw, SXE_RTTDT2C(i), reg); + } + + reg = SXE_RTTDCS_TDPAC | SXE_RTTDCS_TDRM; + SXE_REG_WRITE(hw, SXE_RTTDCS, reg); + + return; +} + +void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority) +{ + u32 reg; + u8 i; + + reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM | + (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT) | + SXE_RTTPCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTTPCS, reg); + + reg = 0; + for (i = 0; i < max_priority; i++) { + reg |= (prio_tc[i] << (i * SXE_RTTUP2TC_UP_SHIFT)); + } + + SXE_REG_WRITE(hw, SXE_RTTUP2TC, reg); + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << SXE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << SXE_RTTPT2C_BWG_SHIFT; + + if (prio_type[i] == PRIO_GROUP) { + reg |= SXE_RTTPT2C_GSP; + } + + if (prio_type[i] == PRIO_LINK) { + reg |= SXE_RTTPT2C_LSP; + } + + SXE_REG_WRITE(hw, SXE_RTTPT2C(i), reg); + } + + reg = SXE_RTTPCS_TPPAC | SXE_RTTPCS_TPRM | + (SXE_RTTPCS_ARBD_DCB << SXE_RTTPCS_ARBD_SHIFT); + SXE_REG_WRITE(hw, SXE_RTTPCS, reg); + + return; +} + +void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw, + u8 pfc_en, u8 *prio_tc, + u8 max_priority) +{ + u32 i, j, fcrtl, reg; + u8 max_tc = 0; + u32 reg_val; + + reg_val = SXE_REG_READ(hw, SXE_FLCTRL); + + reg_val &= ~SXE_FCTRL_TFCE_MASK; + reg_val |= SXE_FCTRL_TFCE_PFC_EN; + + reg_val |= SXE_FCTRL_TFCE_DPF_EN; + + reg_val &= ~(SXE_FCTRL_TFCE_FCEN_MASK | SXE_FCTRL_TFCE_XONE_MASK); + reg_val |= (pfc_en << 16) & SXE_FCTRL_TFCE_FCEN_MASK; + reg_val |= (pfc_en << 24) & SXE_FCTRL_TFCE_XONE_MASK; + + reg_val &= ~SXE_FCTRL_RFCE_MASK; + reg_val |= SXE_FCTRL_RFCE_PFC_EN; + SXE_REG_WRITE(hw, SXE_FLCTRL, reg_val); + + reg_val = SXE_REG_READ(hw, SXE_PFCTOP); + reg_val &= ~SXE_PFCTOP_FCOP_MASK; + reg_val |= SXE_PFCTOP_FCT; + reg_val |= SXE_PFCTOP_FCOP_PFC; + SXE_REG_WRITE(hw, SXE_PFCTOP, reg_val); + + for (i = 0; i < max_priority; i++) { + if (prio_tc[i] > max_tc) { + max_tc = prio_tc[i]; + } + } + + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + + for (j = 0; j < max_priority; j++) { + if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { + enabled = 1; + break; + } + } + + if (enabled) { + reg = (hw->fc.high_water[i] << 9) | SXE_FCRTH_FCEN; + fcrtl = (hw->fc.low_water[i] << 9) | SXE_FCRTL_XONE; + SXE_REG_WRITE(hw, SXE_FCRTL(i), fcrtl); + } else { + + reg = (SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 24576) >> 1; + SXE_REG_WRITE(hw, SXE_FCRTL(i), 0); + } + + SXE_REG_WRITE(hw, SXE_FCRTH(i), reg); + } + + for (; i < MAX_TRAFFIC_CLASS; i++) { + SXE_REG_WRITE(hw, SXE_FCRTL(i), 0); + SXE_REG_WRITE(hw, SXE_FCRTH(i), 0); + } + + reg = hw->fc.pause_time * 0x00010001; + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) { + SXE_REG_WRITE(hw, SXE_FCTTV(i), reg); + } + + SXE_REG_WRITE(hw, SXE_FCRTV, hw->fc.pause_time / 2); + + return; +} + +static void sxe_hw_dcb_8tc_vmdq_off_stats_configure(struct sxe_hw *hw) +{ + u32 reg; + u8 i; + + for (i = 0; i < 32; i++) { + reg = 0x01010101 * (i / 4); + SXE_REG_WRITE(hw, SXE_RQSMR(i), reg); + } + + for (i = 0; i < 32; i++) { + if (i < 8) { + reg = 0x00000000; + } else if (i < 16) { + reg = 0x01010101; + } else if (i < 20) { + reg = 0x02020202; + } else if (i < 24) { + reg = 0x03030303; + } else if (i < 26) { + reg = 0x04040404; + } else if (i < 28) { + reg = 0x05050505; + } else if (i < 30) { + reg = 0x06060606; + } else { + reg = 0x07070707; + } + + SXE_REG_WRITE(hw, SXE_TQSM(i), reg); + } + + return; +} + +static void sxe_hw_dcb_rx_up_tc_map_set(struct sxe_hw *hw, u8 tc) +{ + u8 i; + u32 reg, rsave; + + reg = SXE_REG_READ(hw, SXE_RTRUP2TC); + rsave = reg; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * SXE_RTRUP2TC_UP_SHIFT); + + if (up2tc > tc) { + reg &= ~(0x7 << SXE_RTRUP2TC_UP_MASK); + } + } + + if (reg != rsave) { + SXE_REG_WRITE(hw, SXE_RTRUP2TC, reg); + } + + return; +} + +void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw, + bool is_enable) +{ + if (true == is_enable) { + SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN); + } else { + SXE_REG_WRITE(hw, SXE_PFDTXGSWC, 0); + } + + return; +} + +void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx, + u16 pf_vlan, u8 ring_per_pool) +{ + u32 qde = SXE_QDE_ENABLE; + u8 i; + + if (pf_vlan) { + qde |= SXE_QDE_HIDE_VLAN; + } + + for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) + { + u32 value; + + SXE_WRITE_FLUSH(hw); + + value = i << SXE_QDE_IDX_SHIFT; + value |= qde | SXE_QDE_WRITE; + + SXE_REG_WRITE(hw, SXE_QDE, value); + } + + return; +} + +u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx) +{ + return SXE_REG_READ(hw, SXE_VFRE(reg_idx)); +} + +void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw, + u8 reg_idx, u32 bitmap) +{ + SXE_REG_WRITE(hw, SXE_VFRE(reg_idx), bitmap); + + return; +} + +u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx) +{ + return SXE_REG_READ(hw, SXE_VFTE(reg_idx)); +} + +void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw, + u8 reg_idx, u32 bitmap) +{ + SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), bitmap); + + return; +} + +void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_RTTBCNRM, value); + + return; +} + +void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw, + u32 ring_idx, u32 rate) +{ + SXE_REG_WRITE(hw, SXE_RTTDQSEL, ring_idx); + SXE_REG_WRITE(hw, SXE_RTTBCNRC, rate); + + return; +} + +void sxe_hw_spoof_count_enable(struct sxe_hw *hw, + u8 reg_idx, u8 bit_index) +{ + u32 value = SXE_REG_READ(hw, SXE_VMECM(reg_idx)); + + value |= BIT(bit_index); + + SXE_REG_WRITE(hw, SXE_VMECM(reg_idx), value); + + return; +} + +void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw, + u8 vf_idx, bool status) +{ + u8 reg_index = vf_idx >> 3; + u8 bit_index = vf_idx % 8; + u32 value; + + value = SXE_REG_READ(hw, SXE_SPOOF(reg_index)); + + if (status) { + value |= BIT(bit_index); + } else { + value &= ~BIT(bit_index); + } + + SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value); + + return; +} + +static void sxe_hw_dcb_rx_up_tc_map_get(struct sxe_hw *hw, u8 *map) +{ + u32 reg, i; + + reg = SXE_REG_READ(hw, SXE_RTRUP2TC); + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + map[i] = SXE_RTRUP2TC_UP_MASK & + (reg >> (i * SXE_RTRUP2TC_UP_SHIFT)); + } + + return; +} + +void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable) +{ + u32 srrctl = SXE_REG_READ(hw, SXE_SRRCTL(idx)); + + if (true == is_enable) { + srrctl |= SXE_SRRCTL_DROP_EN; + } else { + srrctl &= ~SXE_SRRCTL_DROP_EN; + } + + SXE_REG_WRITE(hw, SXE_SRRCTL(idx), srrctl); + + return; +} + +static void sxe_hw_pool_vlan_anti_spoof_set(struct sxe_hw *hw, + u8 vf_idx, bool status) +{ + u8 reg_index = vf_idx >> 3; + u8 bit_index = (vf_idx % 8) + SXE_SPOOF_VLAN_SHIFT; + u32 value; + + value = SXE_REG_READ(hw, SXE_SPOOF(reg_index)); + + if (status) { + value |= BIT(bit_index); + } else { + value &= ~BIT(bit_index); + } + + SXE_REG_WRITE(hw, SXE_SPOOF(reg_index), value); + + return; +} + +static void sxe_hw_vf_tx_desc_addr_clear(struct sxe_hw *hw, + u8 vf_idx, u8 ring_per_pool) +{ + u8 i; + + for (i = 0; i < ring_per_pool; i++) { + SXE_REG_WRITE(hw, SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, i), 0); + SXE_REG_WRITE(hw, SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, i), 0); + } + + return; +} + +static void sxe_hw_vf_tx_ring_disable(struct sxe_hw *hw, + u8 ring_per_pool, u8 vf_idx) +{ + u32 ring_idx; + u32 reg; + + for (ring_idx = 0; ring_idx < ring_per_pool; ring_idx++) { + u32 reg_idx = vf_idx * ring_per_pool + ring_idx; + reg = SXE_REG_READ(hw, SXE_TXDCTL(reg_idx)); + if (reg) { + reg |= SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg); + reg &= ~SXE_TXDCTL_ENABLE; + SXE_REG_WRITE(hw, SXE_TXDCTL(reg_idx), reg); + } + } + + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max) +{ + u32 i; + + for (i = 0; i < ring_max; i++) { + SXE_REG_WRITE(hw, SXE_RTTDQSEL, i); + SXE_REG_WRITE(hw, SXE_RTTBCNRC, 0); + } + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_tx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu) +{ + u32 value = cpu; + + value <<= SXE_TPH_TXCTRL_CPUID_SHIFT; + + value |= SXE_TPH_TXCTRL_DESC_RRO_EN | \ + SXE_TPH_TXCTRL_DATA_RRO_EN | \ + SXE_TPH_TXCTRL_DESC_TPH_EN; + + SXE_REG_WRITE(hw, SXE_TPH_TXCTRL(ring_idx), value); + return; +} + +static void sxe_hw_rx_tph_update(struct sxe_hw *hw, u8 ring_idx, u8 cpu) +{ + u32 value = cpu; + + value <<= SXE_TPH_RXCTRL_CPUID_SHIFT; + + value |= SXE_TPH_RXCTRL_DESC_RRO_EN | \ + SXE_TPH_RXCTRL_DATA_TPH_EN | \ + SXE_TPH_RXCTRL_DESC_TPH_EN; + + SXE_REG_WRITE(hw, SXE_TPH_RXCTRL(ring_idx), value); + return; +} + +static void sxe_hw_tph_switch(struct sxe_hw *hw, bool is_enable) +{ + if (is_enable == true) { + SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_MODE_CB2); + } else { + SXE_REG_WRITE(hw, SXE_TPH_CTRL, SXE_TPH_CTRL_DISABLE); + } + + return; +} + +static const struct sxe_dma_operations sxe_dma_ops = { + .rx_dma_ctrl_init = sxe_hw_rx_dma_ctrl_init, + .rx_ring_switch = sxe_hw_rx_ring_switch, + .rx_ring_switch_not_polling = sxe_hw_rx_ring_switch_not_polling, + .rx_ring_desc_configure = sxe_hw_rx_ring_desc_configure, + .rx_desc_thresh_set = sxe_hw_rx_desc_thresh_set, + .rx_rcv_ctl_configure = sxe_hw_rx_rcv_ctl_configure, + .rx_lro_ctl_configure = sxe_hw_rx_lro_ctl_configure, + .rx_desc_ctrl_get = sxe_hw_rx_desc_ctrl_get, + .rx_dma_lro_ctl_set = sxe_hw_rx_dma_lro_ctrl_set, + .rx_drop_switch = sxe_hw_rx_drop_switch, + .pool_rx_ring_drop_enable = sxe_hw_pool_rx_ring_drop_enable, + .rx_tph_update = sxe_hw_rx_tph_update, + + .tx_enable = sxe_hw_tx_enable, + .tx_multi_ring_configure = sxe_hw_tx_multi_ring_configure, + .tx_ring_desc_configure = sxe_hw_tx_ring_desc_configure, + .tx_desc_thresh_set = sxe_hw_tx_desc_thresh_set, + .tx_desc_wb_thresh_clear = sxe_hw_tx_desc_wb_thresh_clear, + .tx_ring_switch = sxe_hw_tx_ring_switch, + .tx_ring_switch_not_polling = sxe_hw_tx_ring_switch_not_polling, + .tx_pkt_buf_thresh_configure = sxe_hw_tx_pkt_buf_thresh_configure, + .tx_desc_ctrl_get = sxe_hw_tx_desc_ctrl_get, + .tx_ring_info_get = sxe_hw_tx_ring_info_get, + .tx_tph_update = sxe_hw_tx_tph_update, + + .tph_switch = sxe_hw_tph_switch, + + .vlan_tag_strip_switch = sxe_hw_vlan_tag_strip_switch, + .tx_vlan_tag_set = sxe_hw_tx_vlan_tag_set, + .tx_vlan_tag_clear = sxe_hw_tx_vlan_tag_clear, + + .dcb_rx_bw_alloc_configure = sxe_hw_dcb_rx_bw_alloc_configure, + .dcb_tx_desc_bw_alloc_configure = sxe_hw_dcb_tx_desc_bw_alloc_configure, + .dcb_tx_data_bw_alloc_configure = sxe_hw_dcb_tx_data_bw_alloc_configure, + .dcb_pfc_configure = sxe_hw_dcb_pfc_configure, + .dcb_tc_stats_configure = sxe_hw_dcb_8tc_vmdq_off_stats_configure, + .dcb_rx_up_tc_map_set = sxe_hw_dcb_rx_up_tc_map_set, + .dcb_rx_up_tc_map_get = sxe_hw_dcb_rx_up_tc_map_get, + .dcb_rate_limiter_clear = sxe_hw_dcb_rate_limiter_clear, + .dcb_tx_ring_rate_factor_set = sxe_hw_dcb_tx_ring_rate_factor_set, + + .vt_pool_loopback_switch = sxe_hw_vt_pool_loopback_switch, + .rx_pool_get = sxe_hw_rx_pool_bitmap_get, + .rx_pool_set = sxe_hw_rx_pool_bitmap_set, + .tx_pool_get = sxe_hw_tx_pool_bitmap_get, + .tx_pool_set = sxe_hw_tx_pool_bitmap_set, + + .vf_tx_desc_addr_clear = sxe_hw_vf_tx_desc_addr_clear, + .pool_mac_anti_spoof_set = sxe_hw_pool_mac_anti_spoof_set, + .pool_vlan_anti_spoof_set = sxe_hw_pool_vlan_anti_spoof_set, + + .max_dcb_memory_window_set = sxe_hw_dcb_max_mem_window_set, + .spoof_count_enable = sxe_hw_spoof_count_enable, + + .vf_tx_ring_disable = sxe_hw_vf_tx_ring_disable, + .all_ring_disable = sxe_hw_all_ring_disable, + .tx_ring_tail_init = sxe_hw_tx_ring_tail_init, +}; + + +#ifdef SXE_IPSEC_CONFIGURE + +static void sxe_hw_ipsec_rx_sa_load(struct sxe_hw *hw, u16 idx, + u8 type) +{ + u32 reg = SXE_REG_READ(hw, SXE_IPSRXIDX); + + reg &= SXE_RXTXIDX_IPS_EN; + reg |= type << SXE_RXIDX_TBL_SHIFT | + idx << SXE_RXTXIDX_IDX_SHIFT | + SXE_RXTXIDX_WRITE; + SXE_REG_WRITE(hw, SXE_IPSRXIDX, reg); + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_ipsec_rx_ip_store(struct sxe_hw *hw, + __be32 *ip_addr, u8 ip_len, u8 ip_idx) +{ + u8 i; + + for (i = 0; i < ip_len; i++) { + SXE_REG_WRITE(hw, SXE_IPSRXIPADDR(i), + (__force u32)cpu_to_le32((__force u32)ip_addr[i])); + } + SXE_WRITE_FLUSH(hw); + sxe_hw_ipsec_rx_sa_load(hw, ip_idx, SXE_IPSEC_IP_TABLE); + + return; +} + +static void sxe_hw_ipsec_rx_spi_store(struct sxe_hw *hw, + __be32 spi, u8 ip_idx, u16 sa_idx) +{ + SXE_REG_WRITE(hw, SXE_IPSRXSPI, (__force u32)cpu_to_le32((__force u32)spi)); + + SXE_REG_WRITE(hw, SXE_IPSRXIPIDX, ip_idx); + + SXE_WRITE_FLUSH(hw); + + sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_SPI_TABLE); + + return; +} + +static void sxe_hw_ipsec_rx_key_store(struct sxe_hw *hw, + u32 *key, u8 key_len, u32 salt, u32 mode, u16 sa_idx) +{ + u8 i; + + for (i = 0; i < key_len; i++) { + SXE_REG_WRITE(hw, SXE_IPSRXKEY(i), + (__force u32)cpu_to_be32(key[(key_len - 1) - i])); + } + + SXE_REG_WRITE(hw, SXE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); + SXE_REG_WRITE(hw, SXE_IPSRXMOD, mode); + SXE_WRITE_FLUSH(hw); + + sxe_hw_ipsec_rx_sa_load(hw, sa_idx, SXE_IPSEC_KEY_TABLE); + + return; +} + +static void sxe_hw_ipsec_tx_sa_load(struct sxe_hw *hw, u16 idx) +{ + u32 reg = SXE_REG_READ(hw, SXE_IPSTXIDX); + + reg &= SXE_RXTXIDX_IPS_EN; + reg |= idx << SXE_RXTXIDX_IDX_SHIFT | SXE_RXTXIDX_WRITE; + SXE_REG_WRITE(hw, SXE_IPSTXIDX, reg); + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_ipsec_tx_key_store(struct sxe_hw *hw, u32 *key, + u8 key_len, u32 salt, u16 sa_idx) +{ + u8 i; + + for (i = 0; i < key_len; i++) { + SXE_REG_WRITE(hw, SXE_IPSTXKEY(i), + (__force u32)cpu_to_be32(key[(key_len - 1) - i])); + } + SXE_REG_WRITE(hw, SXE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); + SXE_WRITE_FLUSH(hw); + + sxe_hw_ipsec_tx_sa_load(hw, sa_idx); + + return; +} + +static void sxe_hw_ipsec_sec_data_stop(struct sxe_hw *hw, bool is_linkup) +{ + u32 tx_empty, rx_empty; + u32 limit; + u32 reg; + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg |= SXE_SECTXCTRL_TX_DIS; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg |= SXE_SECRXCTRL_RX_DIS; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + + tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & SXE_SECTXSTAT_SECTX_RDY; + rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & SXE_SECRXSTAT_SECRX_RDY; + if (tx_empty && rx_empty) { + goto l_out; + } + + if (!is_linkup) { + SXE_REG_WRITE(hw, SXE_LPBKCTRL, SXE_LPBKCTRL_EN); + + SXE_WRITE_FLUSH(hw); + mdelay(3); + } + + limit = 20; + do { + mdelay(10); + tx_empty = SXE_REG_READ(hw, SXE_SECTXSTAT) & + SXE_SECTXSTAT_SECTX_RDY; + rx_empty = SXE_REG_READ(hw, SXE_SECRXSTAT) & + SXE_SECRXSTAT_SECRX_RDY; + } while (!(tx_empty && rx_empty) && limit--); + + if (!is_linkup) { + SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0); + + SXE_WRITE_FLUSH(hw); + } + +l_out: + return; +} + +static void sxe_hw_ipsec_engine_start(struct sxe_hw *hw, bool is_linkup) +{ + u32 reg; + + sxe_hw_ipsec_sec_data_stop(hw, is_linkup); + + reg = SXE_REG_READ(hw, SXE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x3; + SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg); + + reg = SXE_REG_READ(hw, SXE_SECTXBUFFAF); + reg = (reg & 0xfffffc00) | 0x15; + SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, reg); + + SXE_REG_WRITE(hw, SXE_SECRXCTRL, 0); + SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_STORE_FORWARD); + + SXE_REG_WRITE(hw, SXE_IPSTXIDX, SXE_RXTXIDX_IPS_EN); + SXE_REG_WRITE(hw, SXE_IPSRXIDX, SXE_RXTXIDX_IPS_EN); + + SXE_WRITE_FLUSH(hw); + + return; +} + +static void sxe_hw_ipsec_engine_stop(struct sxe_hw *hw, bool is_linkup) +{ + u32 reg; + + sxe_hw_ipsec_sec_data_stop(hw, is_linkup); + + SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0); + SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0); + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg |= SXE_SECTXCTRL_SECTX_DIS; + reg &= ~SXE_SECTXCTRL_STORE_FORWARD; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg |= SXE_SECRXCTRL_SECRX_DIS; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + + SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250); + + reg = SXE_REG_READ(hw, SXE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x1; + SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg); + + SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS); + SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS); + + SXE_WRITE_FLUSH(hw); + + return; +} + +bool sxe_hw_ipsec_offload_is_disable(struct sxe_hw *hw) +{ + u32 tx_dis = SXE_REG_READ(hw, SXE_SECTXSTAT); + u32 rx_dis = SXE_REG_READ(hw, SXE_SECRXSTAT); + bool ret = false; + + if ((tx_dis & SXE_SECTXSTAT_SECTX_OFF_DIS) || + (rx_dis & SXE_SECRXSTAT_SECRX_OFF_DIS)) { + ret = true; + } + + return ret; +} + +void sxe_hw_ipsec_sa_disable(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_IPSRXIDX, 0); + SXE_REG_WRITE(hw, SXE_IPSTXIDX, 0); + + return; +} + +static const struct sxe_sec_operations sxe_sec_ops = { + .ipsec_rx_ip_store = sxe_hw_ipsec_rx_ip_store, + .ipsec_rx_spi_store = sxe_hw_ipsec_rx_spi_store, + .ipsec_rx_key_store = sxe_hw_ipsec_rx_key_store, + .ipsec_tx_key_store = sxe_hw_ipsec_tx_key_store, + .ipsec_sec_data_stop = sxe_hw_ipsec_sec_data_stop, + .ipsec_engine_start = sxe_hw_ipsec_engine_start, + .ipsec_engine_stop = sxe_hw_ipsec_engine_stop, + .ipsec_sa_disable = sxe_hw_ipsec_sa_disable, + .ipsec_offload_is_disable = sxe_hw_ipsec_offload_is_disable, +}; +#endif + +static const struct sxe_sec_operations sxe_sec_ops = { 0 }; + + +void sxe_hw_stats_regs_clean(struct sxe_hw *hw) +{ + u16 i; + for (i = 0; i < 16; i++) { + SXE_REG_READ(hw, SXE_QPTC(i)); + SXE_REG_READ(hw, SXE_QPRC(i)); + SXE_REG_READ(hw, SXE_QBTC_H(i)); + SXE_REG_READ(hw, SXE_QBTC_L(i)); + SXE_REG_READ(hw, SXE_QBRC_H(i)); + SXE_REG_READ(hw, SXE_QBRC_L(i)); + SXE_REG_READ(hw, SXE_QPRDC(i)); + } + + SXE_REG_READ(hw, SXE_RXDGBCH); + SXE_REG_READ(hw, SXE_RXDGBCL); + SXE_REG_READ(hw, SXE_RXDGPC); + SXE_REG_READ(hw, SXE_TXDGPC); + SXE_REG_READ(hw, SXE_TXDGBCH); + SXE_REG_READ(hw, SXE_TXDGBCL); + SXE_REG_READ(hw,SXE_RXDDGPC); + SXE_REG_READ(hw, SXE_RXDDGBCH); + SXE_REG_READ(hw,SXE_RXDDGBCL); + SXE_REG_READ(hw,SXE_RXLPBKGPC); + SXE_REG_READ(hw, SXE_RXLPBKGBCH); + SXE_REG_READ(hw,SXE_RXLPBKGBCL); + SXE_REG_READ(hw,SXE_RXDLPBKGPC); + SXE_REG_READ(hw, SXE_RXDLPBKGBCH); + SXE_REG_READ(hw,SXE_RXDLPBKGBCL); + SXE_REG_READ(hw,SXE_RXTPCIN); + SXE_REG_READ(hw,SXE_RXTPCOUT); + SXE_REG_READ(hw,SXE_RXPRDDC); + SXE_REG_READ(hw, SXE_TXSWERR); + SXE_REG_READ(hw, SXE_TXSWITCH); + SXE_REG_READ(hw, SXE_TXREPEAT); + SXE_REG_READ(hw, SXE_TXDESCERR); + + SXE_REG_READ(hw, SXE_CRCERRS); + SXE_REG_READ(hw, SXE_ERRBC); + SXE_REG_READ(hw, SXE_RLEC); + SXE_REG_READ(hw, SXE_PRC64); + SXE_REG_READ(hw, SXE_PRC127); + SXE_REG_READ(hw, SXE_PRC255); + SXE_REG_READ(hw, SXE_PRC511); + SXE_REG_READ(hw, SXE_PRC1023); + SXE_REG_READ(hw, SXE_PRC1522); + SXE_REG_READ(hw, SXE_GPRC); + SXE_REG_READ(hw, SXE_BPRC); + SXE_REG_READ(hw, SXE_MPRC); + SXE_REG_READ(hw, SXE_GPTC); + SXE_REG_READ(hw, SXE_GORCL); + SXE_REG_READ(hw, SXE_GORCH); + SXE_REG_READ(hw, SXE_GOTCL); + SXE_REG_READ(hw, SXE_GOTCH); + SXE_REG_READ(hw, SXE_RUC); + SXE_REG_READ(hw, SXE_RFC); + SXE_REG_READ(hw, SXE_ROC); + SXE_REG_READ(hw, SXE_RJC); + for (i = 0; i < 8; i++) { + SXE_REG_READ(hw, SXE_PRCPF(i)); + } + SXE_REG_READ(hw, SXE_TORL); + SXE_REG_READ(hw, SXE_TORH); + SXE_REG_READ(hw, SXE_TPR); + SXE_REG_READ(hw, SXE_TPT); + SXE_REG_READ(hw, SXE_PTC64); + SXE_REG_READ(hw, SXE_PTC127); + SXE_REG_READ(hw, SXE_PTC255); + SXE_REG_READ(hw, SXE_PTC511); + SXE_REG_READ(hw, SXE_PTC1023); + SXE_REG_READ(hw, SXE_PTC1522); + SXE_REG_READ(hw, SXE_MPTC); + SXE_REG_READ(hw, SXE_BPTC); + for (i = 0; i < 8; i++) { + SXE_REG_READ(hw, SXE_PFCT(i)); + } + + return; +} + +static void sxe_hw_stats_seq_get(struct sxe_hw *hw, struct sxe_mac_stats *stats) +{ + u8 i; + u64 tx_pfc_num = 0; +#ifdef SXE_DPDK + u64 gotch = 0; + u32 rycle_cnt = 10; +#endif + + for (i = 0; i < 8; i++) { + stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i)); + tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i)); + stats->pfct[i] += tx_pfc_num; + stats->total_tx_pause += tx_pfc_num; + } + + stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC); + stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) | + ((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32)); +#ifdef SXE_DPDK + do { + gotch = SXE_REG_READ(hw, SXE_GOTCH); + rycle_cnt--; + } while (gotch != 0 && rycle_cnt != 0); + if (gotch != 0) { + LOG_INFO("GOTCH is not clear!\n"); + } +#endif + + return; +} + +void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats) +{ + u8 i; + u64 tx_pfc_num = 0; + u64 gotch = 0; + u32 rycle_cnt = 10; + + stats->total_gotc += (SXE_REG_READ(hw, SXE_GOTCL) | + ((u64)SXE_REG_READ(hw, SXE_GOTCH) << 32)); + stats->total_gptc += SXE_REG_READ(hw, SXE_GPTC); + do { + gotch = SXE_REG_READ(hw, SXE_GOTCH); + rycle_cnt--; + } while (gotch != 0 && rycle_cnt != 0); + if (gotch != 0) { + LOG_INFO("GOTCH is not clear!\n"); + } + + for (i = 0; i < 8; i++) { + stats->prcpf[i] += SXE_REG_READ(hw,SXE_PRCPF(i)); + tx_pfc_num = SXE_REG_READ(hw,SXE_PFCT(i)); + stats->pfct[i] += tx_pfc_num; + stats->total_tx_pause += tx_pfc_num; + } + + return; +} + +void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats) +{ + u64 rjc; + u32 i, rx_dbu_drop, ring_drop = 0; + u64 tpr = 0; +#ifdef SXE_DPDK + u32 rycle_cnt = 10; + u64 gorch, torh = 0; +#endif + + for (i = 0; i < 16; i++) { + stats->qptc[i] += SXE_REG_READ(hw, SXE_QPTC(i)); + stats->qprc[i] += SXE_REG_READ(hw, SXE_QPRC(i)); + ring_drop = SXE_REG_READ(hw, SXE_QPRDC(i)); + stats->qprdc[i] += ring_drop; + stats->hw_rx_no_dma_resources += ring_drop; + + stats->qbtc[i] += ((u64)SXE_REG_READ(hw, SXE_QBTC_H(i)) << 32); + SXE_RMB(); + stats->qbtc[i] += SXE_REG_READ(hw, SXE_QBTC_L(i)); + + stats->qbrc[i] += ((u64)SXE_REG_READ(hw, SXE_QBRC_H(i)) << 32); + SXE_RMB(); + stats->qbrc[i] += SXE_REG_READ(hw, SXE_QBRC_L(i)); + } + stats->rxdgbc += ((u64)SXE_REG_READ(hw, SXE_RXDGBCH) << 32) + + (SXE_REG_READ(hw, SXE_RXDGBCL)); + + stats->rxdgpc += SXE_REG_READ(hw, SXE_RXDGPC); + stats->txdgpc += SXE_REG_READ(hw, SXE_TXDGPC); + stats->txdgbc += (((u64)SXE_REG_READ(hw, SXE_TXDGBCH) << 32) + + SXE_REG_READ(hw, SXE_TXDGBCL)); + + stats->rxddpc += SXE_REG_READ(hw,SXE_RXDDGPC); + stats->rxddbc += ((u64)SXE_REG_READ(hw, SXE_RXDDGBCH) << 32) + + (SXE_REG_READ(hw,SXE_RXDDGBCL)); + + stats->rxlpbkpc += SXE_REG_READ(hw,SXE_RXLPBKGPC); + stats->rxlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXLPBKGBCH) << 32) + + (SXE_REG_READ(hw,SXE_RXLPBKGBCL)); + + stats->rxdlpbkpc += SXE_REG_READ(hw,SXE_RXDLPBKGPC); + stats->rxdlpbkbc += ((u64)SXE_REG_READ(hw, SXE_RXDLPBKGBCH) << 32) + + (SXE_REG_READ(hw,SXE_RXDLPBKGBCL)); + stats->rxtpcing += SXE_REG_READ(hw,SXE_RXTPCIN); + stats->rxtpceng += SXE_REG_READ(hw,SXE_RXTPCOUT); + stats->prddc += SXE_REG_READ(hw,SXE_RXPRDDC); + stats->txswerr += SXE_REG_READ(hw, SXE_TXSWERR); + stats->txswitch += SXE_REG_READ(hw, SXE_TXSWITCH); + stats->txrepeat += SXE_REG_READ(hw, SXE_TXREPEAT); + stats->txdescerr += SXE_REG_READ(hw, SXE_TXDESCERR); + + for (i = 0; i < 8; i++) { + stats->dburxtcin[i] += SXE_REG_READ(hw, SXE_DBUDRTCICNT(i)); + stats->dburxtcout[i] += SXE_REG_READ(hw, SXE_DBUDRTCOCNT(i)); + stats->dburxgdreecnt[i] += SXE_REG_READ(hw, SXE_DBUDREECNT(i)); + rx_dbu_drop = SXE_REG_READ(hw, SXE_DBUDROFPCNT(i)); + stats->dburxdrofpcnt[i] += rx_dbu_drop; + stats->dbutxtcin[i] += SXE_REG_READ(hw,SXE_DBUDTTCICNT(i)); + stats->dbutxtcout[i] += SXE_REG_READ(hw,SXE_DBUDTTCOCNT(i)); + } + + stats->fnavadd += (SXE_REG_READ(hw, SXE_FNAVUSTAT) & 0xFFFF); + stats->fnavrmv += ((SXE_REG_READ(hw, SXE_FNAVUSTAT) >> 16) & 0xFFFF); + stats->fnavadderr += (SXE_REG_READ(hw, SXE_FNAVFSTAT) & 0xFFFF); + stats->fnavrmverr += ((SXE_REG_READ(hw, SXE_FNAVFSTAT) >> 16) & 0xFFFF); + stats->fnavmatch += SXE_REG_READ(hw, SXE_FNAVMATCH); + stats->fnavmiss += SXE_REG_READ(hw, SXE_FNAVMISS); + + sxe_hw_stats_seq_get(hw, stats); + + stats->crcerrs += SXE_REG_READ(hw, SXE_CRCERRS); + stats->errbc += SXE_REG_READ(hw, SXE_ERRBC); + stats->bprc += SXE_REG_READ(hw, SXE_BPRC); + stats->mprc += SXE_REG_READ(hw, SXE_MPRC); + stats->roc += SXE_REG_READ(hw, SXE_ROC); + stats->prc64 += SXE_REG_READ(hw, SXE_PRC64); + stats->prc127 += SXE_REG_READ(hw, SXE_PRC127); + stats->prc255 += SXE_REG_READ(hw, SXE_PRC255); + stats->prc511 += SXE_REG_READ(hw, SXE_PRC511); + stats->prc1023 += SXE_REG_READ(hw, SXE_PRC1023); + stats->prc1522 += SXE_REG_READ(hw, SXE_PRC1522); + stats->rlec += SXE_REG_READ(hw, SXE_RLEC); + stats->mptc += SXE_REG_READ(hw, SXE_MPTC); + stats->ruc += SXE_REG_READ(hw, SXE_RUC); + stats->rfc += SXE_REG_READ(hw, SXE_RFC); + + rjc = SXE_REG_READ(hw, SXE_RJC); + stats->rjc += rjc; + stats->roc += rjc; + + tpr = SXE_REG_READ(hw, SXE_TPR); + stats->tpr += tpr; + stats->tpt += SXE_REG_READ(hw, SXE_TPT); + stats->ptc64 += SXE_REG_READ(hw, SXE_PTC64); + stats->ptc127 += SXE_REG_READ(hw, SXE_PTC127); + stats->ptc255 += SXE_REG_READ(hw, SXE_PTC255); + stats->ptc511 += SXE_REG_READ(hw, SXE_PTC511); + stats->ptc1023 += SXE_REG_READ(hw, SXE_PTC1023); + stats->ptc1522 += SXE_REG_READ(hw, SXE_PTC1522); + stats->bptc += SXE_REG_READ(hw, SXE_BPTC); + + stats->gprc += SXE_REG_READ(hw, SXE_GPRC); + stats->gorc += (SXE_REG_READ(hw, SXE_GORCL) | + ((u64)SXE_REG_READ(hw, SXE_GORCH) << 32)); +#ifdef SXE_DPDK + do { + gorch = SXE_REG_READ(hw, SXE_GORCH); + rycle_cnt--; + } while (gorch != 0 && rycle_cnt != 0); + if (gorch != 0) { + LOG_INFO("GORCH is not clear!\n"); + } +#endif + + stats->tor += (SXE_REG_READ(hw, SXE_TORL) | + ((u64)SXE_REG_READ(hw, SXE_TORH) << 32)); +#ifdef SXE_DPDK + rycle_cnt = 10; + do { + torh = SXE_REG_READ(hw, SXE_TORH); + rycle_cnt--; + } while (torh != 0 && rycle_cnt != 0); + if (torh != 0) { + LOG_INFO("TORH is not clear!\n"); + } +#endif + +#ifdef SXE_DPDK + stats->tor -= tpr * RTE_ETHER_CRC_LEN; + stats->gptc = stats->total_gptc - stats->total_tx_pause; + stats->gotc = stats->total_gotc - stats->total_tx_pause * RTE_ETHER_MIN_LEN + - stats->gptc * RTE_ETHER_CRC_LEN; +#else + stats->gptc = stats->total_gptc; + stats->gotc = stats->total_gotc; +#endif + + return; +} + +static u32 sxe_hw_tx_packets_num_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_TXDGPC); +} + +static u32 sxe_hw_unsec_packets_num_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_SSVPC); +} + +static u32 sxe_hw_mac_stats_dump(struct sxe_hw *hw, u32 *regs_buff, u32 buf_size) +{ + u32 i; + u32 regs_num = buf_size / sizeof(u32); + + for (i = 0; i < regs_num; i++) { + regs_buff[i] = SXE_REG_READ(hw, mac_regs[i]); + } + + return i; +} + +static u32 sxe_hw_tx_dbu_to_mac_stats(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_DTMPCNT); +} + +static const struct sxe_stat_operations sxe_stat_ops = { + .stats_get = sxe_hw_stats_get, + .stats_clear = sxe_hw_stats_regs_clean, + .mac_stats_dump = sxe_hw_mac_stats_dump, + .tx_packets_num_get = sxe_hw_tx_packets_num_get, + .unsecurity_packets_num_get = sxe_hw_unsec_packets_num_get, + .tx_dbu_to_mac_stats = sxe_hw_tx_dbu_to_mac_stats, +}; + +void sxe_hw_mbx_init(struct sxe_hw *hw) +{ + hw->mbx.msg_len = SXE_MBX_MSG_NUM; + hw->mbx.interval = SXE_MBX_RETRY_INTERVAL; + hw->mbx.retry = SXE_MBX_RETRY_COUNT; + + hw->mbx.stats.rcv_msgs = 0; + hw->mbx.stats.send_msgs = 0; + hw->mbx.stats.acks = 0; + hw->mbx.stats.reqs = 0; + hw->mbx.stats.rsts = 0; + + return; +} + +static bool sxe_hw_vf_irq_check(struct sxe_hw *hw, u32 mask, u32 index) +{ + u32 value = SXE_REG_READ(hw, SXE_PFMBICR(index)); + + if (value & mask) { + SXE_REG_WRITE(hw, SXE_PFMBICR(index), mask); + return true; + } + + return false; +} + +bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx) +{ + u32 index = vf_idx >> 5; + u32 bit = vf_idx % 32; + u32 value; + + value = SXE_REG_READ(hw, SXE_VFLRE(index)); + if (value & BIT(bit)) { + SXE_REG_WRITE(hw, SXE_VFLREC(index), BIT(bit)); + hw->mbx.stats.rsts++; + return true; + } + + return false; +} + +bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx) +{ + u8 index = vf_idx >> 4; + u8 bit = vf_idx % 16; + + if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFREQ << bit, index)) { + hw->mbx.stats.reqs++; + return true; + } + + return false; +} + +bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx) +{ + u8 index = vf_idx >> 4; + u8 bit = vf_idx % 16; + + if (sxe_hw_vf_irq_check(hw, SXE_PFMBICR_VFACK << bit, index)) { + hw->mbx.stats.acks++; + return true; + } + + return false; +} + +static bool sxe_hw_mbx_lock(struct sxe_hw *hw, u8 vf_idx) +{ + u32 value; + bool ret = false; + u32 retry = hw->mbx.retry; + + while (retry--) { + SXE_REG_WRITE(hw, SXE_PFMAILBOX(vf_idx), SXE_PFMAILBOX_PFU); + + value = SXE_REG_READ(hw, SXE_PFMAILBOX(vf_idx)); + if (value & SXE_PFMAILBOX_PFU) { + ret = true; + break; + } + + udelay(hw->mbx.interval); + } + + return ret; +} + +s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg, + u16 msg_len, u16 index) +{ + struct sxe_mbx_info *mbx = &hw->mbx; + u8 i; + s32 ret = 0; + u16 msg_entry; + struct sxe_adapter *adapter = hw->adapter; + + msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len; + + if (!sxe_hw_mbx_lock(hw, index)) { + ret = -SXE_ERR_MBX_LOCK_FAIL; + LOG_ERROR_BDF("vf idx:%d msg_len:%d rcv lock mailbox fail.(err:%d)\n", + index, msg_len, ret); + goto l_out; + } + + for (i = 0; i < msg_entry; i++) { + msg[i] = SXE_REG_READ(hw, (SXE_PFMBMEM(index) + (i << 2))); + LOG_DEBUG_BDF("vf_idx:%u read mbx mem[%u]:0x%x.\n", + index, i, msg[i]); + } + + SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_ACK); + mbx->stats.rcv_msgs++; + +l_out: + return ret; +} + +s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg, + u16 msg_len, u16 index) +{ + struct sxe_mbx_info *mbx = &hw->mbx; + u8 i; + s32 ret = 0; + u32 old; + struct sxe_adapter *adapter = hw->adapter; + + if (msg_len > mbx->msg_len) { + ret = -EINVAL; + LOG_ERROR_BDF("pf reply msg num:%d exceed limit:%d reply fail.(err:%d)\n", + msg_len, mbx->msg_len, ret); + goto l_out; + } + + if (!sxe_hw_mbx_lock(hw, index)) { + ret = -SXE_ERR_MBX_LOCK_FAIL; + LOG_ERROR_BDF("send msg len:%u to vf idx:%u msg[0]:0x%x " + "lock mailbox fail.(err:%d)\n", + msg_len, index, msg[0], ret); + goto l_out; + } + + old = SXE_REG_READ(hw, (SXE_PFMBMEM(index))); + LOG_DEBUG_BDF("original send msg:0x%x. mbx mem[0]:0x%x\n", *msg, old); + if (msg[0] & SXE_CTRL_MSG_MASK) { + msg[0] |= (old & SXE_MSGID_MASK); + } else { + msg[0] |= (old & SXE_PFMSG_MASK); + } + + for (i = 0; i < msg_len; i++) { + SXE_REG_WRITE(hw, (SXE_PFMBMEM(index) + (i << 2)), msg[i]); + LOG_DEBUG_BDF("vf_idx:%u write mbx mem[%u]:0x%x.\n", + index, i, msg[i]); + } + + SXE_REG_WRITE(hw, SXE_PFMAILBOX(index), SXE_PFMAILBOX_STS); + mbx->stats.send_msgs++; + +l_out: + return ret; +} + +void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx) +{ + u8 msg_idx; + struct sxe_adapter *adapter = hw->adapter; + for (msg_idx = 0; msg_idx < hw->mbx.msg_len; msg_idx++) { + SXE_REG_WRITE_ARRAY(hw, SXE_PFMBMEM(vf_idx), msg_idx, 0); + } + + SXE_WRITE_FLUSH(hw); + + LOG_INFO_BDF("vf_idx:%u clear mbx mem.\n", vf_idx); + return; +} + +static const struct sxe_mbx_operations sxe_mbx_ops = { + .init = sxe_hw_mbx_init, + + .req_check = sxe_hw_vf_req_check, + .ack_check = sxe_hw_vf_ack_check, + .rst_check = sxe_hw_vf_rst_check, + + .msg_send = sxe_hw_send_msg_to_vf, + .msg_rcv = sxe_hw_rcv_msg_from_vf, + + .mbx_mem_clear = sxe_hw_mbx_mem_clear, +}; + +void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_GCR_EXT, value); + + return; +} + +static const struct sxe_pcie_operations sxe_pcie_ops = { + .vt_mode_set = sxe_hw_pcie_vt_mode_set, +}; + +s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock) +{ + u32 val; + u16 i; + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK); + SXE_WRITE_FLUSH(hw); + + for (i = 0; i < trylock; i++) { + val = SXE_REG_READ(hw, SXE_HDC_SW_LK) & SXE_HDC_SW_LK_BIT; + if (!val) { + break; + } + + udelay(10); + } + + if (i >= trylock) { + LOG_ERROR_BDF("hdc is busy, reg: 0x%x\n", val); + ret = -SXE_ERR_HDC_LOCK_BUSY; + goto l_out; + } + + val = SXE_REG_READ(hw, SXE_HDC_PF_LK) & SXE_HDC_PF_LK_BIT; + if (!val) { + SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK); + LOG_ERROR_BDF("get hdc lock fail, reg: 0x%x\n", val); + ret = -SXE_ERR_HDC_LOCK_BUSY; + goto l_out; + } + + hw->hdc.pf_lock_val = val; + LOG_DEBUG_BDF("hw[%p]'s port[%u] got pf lock\n", hw, val); + +l_out: + return ret; +} + +void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt) +{ + struct sxe_adapter *adapter = hw->adapter; + + do { + SXE_REG_WRITE(hw, SXE_HDC_SW_LK, SXE_HDC_RELEASE_SW_LK); + udelay(1); + if (!(SXE_REG_READ(hw, SXE_HDC_PF_LK) & hw->hdc.pf_lock_val)) { + LOG_DEBUG_BDF("hw[%p]'s port[%u] release pf lock\n", hw, + hw->hdc.pf_lock_val); + hw->hdc.pf_lock_val = 0; + break; + } + } while((retry_cnt--) > 0); + + return; +} + +void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_HDC_FW_OV, 0); +} + +bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw) +{ + bool fw_ov = false; + + if (SXE_REG_READ(hw, SXE_HDC_FW_OV) & SXE_HDC_FW_OV_BIT) { + fw_ov = true; + } + + return fw_ov; +} + +void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_HDC_SW_OV, SXE_HDC_SW_OV_BIT); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, value); + + return; +} + +void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw, + u16 dword_index, u32 value) +{ + SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index, value); + return; +} + +u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_HDC_PACKET_HEAD0); +} + +u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw, + u16 dword_index) +{ + return SXE_READ_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, dword_index); +} + +u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw) +{ + struct sxe_adapter *adapter = hw->adapter; + u32 status = SXE_REG_READ(hw, SXE_FW_STATUS_REG); + + LOG_DEBUG_BDF("fw status[0x%x]\n", status); + + return status; +} + +void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_DRV_STATUS_REG, value); + return; +} + +u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw) +{ + struct sxe_adapter *adapter = hw->adapter; + + u32 state = SXE_REG_READ(hw, SXE_FW_HDC_STATE_REG); + + LOG_DEBUG_BDF("hdc channel state[0x%x]\n", state); + + return state; +} + +STATIC u32 sxe_hw_hdc_irq_event_get(struct sxe_hw *hw) +{ + u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG); + struct sxe_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("msi status[0x%x]\n", status); + + return status; +} + +static void sxe_hw_hdc_irq_event_clear(struct sxe_hw *hw, u32 event) +{ + u32 status = SXE_REG_READ(hw, SXE_HDC_MSI_STATUS_REG); + struct sxe_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("msi status[0x%x] and clear bit=[0x%x]\n", status, event); + + status &= ~event; + SXE_REG_WRITE(hw, SXE_HDC_MSI_STATUS_REG, status); + + return; +} + +static void sxe_hw_hdc_resource_clean(struct sxe_hw *hw) +{ + u16 i; + + SXE_REG_WRITE(hw, SXE_HDC_SW_LK, 0x0); + SXE_REG_WRITE(hw, SXE_HDC_PACKET_HEAD0, 0x0); + for (i = 0; i < SXE_HDC_DATA_LEN_MAX; i++) { + SXE_WRITE_REG_ARRAY_32(hw, SXE_HDC_PACKET_DATA0, i, 0x0); + } + + return; +} + +static const struct sxe_hdc_operations sxe_hdc_ops = { + .pf_lock_get = sxe_hw_hdc_lock_get, + .pf_lock_release = sxe_hw_hdc_lock_release, + .is_fw_over_set = sxe_hw_hdc_is_fw_over_set, + .fw_ack_header_rcv = sxe_hw_hdc_fw_ack_header_get, + .packet_send_done = sxe_hw_hdc_packet_send_done, + .packet_header_send = sxe_hw_hdc_packet_header_send, + .packet_data_dword_send = sxe_hw_hdc_packet_data_dword_send, + .packet_data_dword_rcv = sxe_hw_hdc_packet_data_dword_rcv, + .fw_status_get = sxe_hw_hdc_fw_status_get, + .drv_status_set = sxe_hw_hdc_drv_status_set, + .irq_event_get = sxe_hw_hdc_irq_event_get, + .irq_event_clear = sxe_hw_hdc_irq_event_clear, + .fw_ov_clear = sxe_hw_hdc_fw_ov_clear, + .channel_state_get = sxe_hw_hdc_channel_state_get, + .resource_clean = sxe_hw_hdc_resource_clean, +}; + +#ifdef SXE_PHY_CONFIGURE +#define SXE_MDIO_COMMAND_TIMEOUT 100 + +static s32 sxe_hw_phy_reg_write(struct sxe_hw *hw, s32 prtad, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 ret; + u32 i, command; + struct sxe_adapter *adapter = hw->adapter; + + SXE_REG_WRITE(hw, SXE_MSCD, (u32)phy_data); + + command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) | + (device_type << SXE_MSCA_DEV_TYPE_SHIFT) | + (prtad << SXE_MSCA_PHY_ADDR_SHIFT) | + (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG)); + + SXE_REG_WRITE(hw, SXE_MSCA, command); + + for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = SXE_REG_READ(hw, SXE_MSCA); + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) { + break; + } + } + + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) { + LOG_DEV_ERR("phy write cmd didn't complete, " + "reg_addr=%u, device_type=%u\n", reg_addr, device_type); + ret = -SXE_ERR_MDIO_CMD_TIMEOUT; + goto l_end; + } + + command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) | + (device_type << SXE_MSCA_DEV_TYPE_SHIFT) | + (prtad << SXE_MSCA_PHY_ADDR_SHIFT) | + (SXE_MSCA_WRITE | SXE_MSCA_MDI_CMD_ON_PROG)); + + SXE_REG_WRITE(hw, SXE_MSCA, command); + + for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = SXE_REG_READ(hw, SXE_MSCA); + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) { + break; + } + } + + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) { + LOG_DEV_ERR("phy write cmd didn't complete, " + "reg_addr=%u, device_type=%u\n", reg_addr, device_type); + ret = -SXE_ERR_MDIO_CMD_TIMEOUT; + } + +l_end: + return ret; +} + +static s32 sxe_hw_phy_reg_read(struct sxe_hw *hw, s32 prtad, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 ret = 0; + u32 i, data, command; + struct sxe_adapter *adapter = hw->adapter; + + command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) | + (device_type << SXE_MSCA_DEV_TYPE_SHIFT) | + (prtad << SXE_MSCA_PHY_ADDR_SHIFT) | + (SXE_MSCA_ADDR_CYCLE | SXE_MSCA_MDI_CMD_ON_PROG)); + + SXE_REG_WRITE(hw, SXE_MSCA, command); + + for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = SXE_REG_READ(hw, SXE_MSCA); + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) { + break; + } + } + + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) { + LOG_DEV_ERR("phy read cmd didn't complete, " + "reg_addr=%u, device_type=%u\n", reg_addr, device_type); + ret = -SXE_ERR_MDIO_CMD_TIMEOUT; + goto l_end; + } + + command = ((reg_addr << SXE_MSCA_NP_ADDR_SHIFT) | + (device_type << SXE_MSCA_DEV_TYPE_SHIFT) | + (prtad << SXE_MSCA_PHY_ADDR_SHIFT) | + (SXE_MSCA_READ | SXE_MSCA_MDI_CMD_ON_PROG)); + + SXE_REG_WRITE(hw, SXE_MSCA, command); + + for (i = 0; i < SXE_MDIO_COMMAND_TIMEOUT; i++) { + udelay(10); + + command = SXE_REG_READ(hw, SXE_MSCA); + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) == 0) + break; + } + + if ((command & SXE_MSCA_MDI_CMD_ON_PROG) != 0) { + LOG_DEV_ERR("phy write cmd didn't complete, " + "reg_addr=%u, device_type=%u\n", reg_addr, device_type); + ret = -SXE_ERR_MDIO_CMD_TIMEOUT; + goto l_end; + } + + data = SXE_REG_READ(hw, SXE_MSCD); + data >>= MDIO_MSCD_RDATA_SHIFT; + *phy_data = (u16)(data); + +l_end: + return ret; +} + +#define SXE_PHY_REVISION_MASK 0x000F +#define SXE_PHY_ID_HIGH_5_BIT_MASK 0xFC00 +#define SXE_PHY_ID_HIGH_SHIFT 10 + +static s32 sxe_hw_phy_id_get(struct sxe_hw *hw, u32 prtad, u32 *id) +{ + s32 ret; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + + ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID1, MDIO_MMD_PMAPMD, + &phy_id_low); + + if (ret) { + LOG_ERROR("get phy id upper 16 bits failed, prtad=%d\n", prtad); + goto l_end; + } + + ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_DEVID2, MDIO_MMD_PMAPMD, + &phy_id_high); + if (ret) { + LOG_ERROR("get phy id lower 4 bits failed, prtad=%d\n", prtad); + goto l_end; + } + + *id = (u32)((phy_id_high >> SXE_PHY_ID_HIGH_SHIFT) << 16); + *id |= (u32)phy_id_low; + +l_end: + return ret; +} + +s32 sxe_hw_phy_link_cap_get(struct sxe_hw *hw, u32 prtad, u32 *speed) +{ + s32 ret; + u16 speed_ability; + + ret = hw->phy.ops->reg_read(hw, prtad, MDIO_SPEED, MDIO_MMD_PMAPMD, + &speed_ability); + if (ret) { + *speed = 0; + LOG_ERROR("get phy link cap failed, ret=%d, prtad=%d\n", + ret, prtad); + goto l_end; + } + + if (speed_ability & MDIO_SPEED_10G) { + *speed |= SXE_LINK_SPEED_10GB_FULL; + } + + if (speed_ability & MDIO_PMA_SPEED_1000) { + *speed |= SXE_LINK_SPEED_1GB_FULL; + } + + if (speed_ability & MDIO_PMA_SPEED_100) { + *speed |= SXE_LINK_SPEED_100_FULL; + } + +l_end: + return ret; +} + +static s32 sxe_hw_phy_ctrl_reset(struct sxe_hw *hw, u32 prtad) +{ + u32 i; + s32 ret; + u16 ctrl; + + ret = sxe_hw_phy_reg_write(hw, prtad, MDIO_CTRL1, + MDIO_MMD_PHYXS, MDIO_CTRL1_RESET); + if (ret) { + LOG_ERROR("phy reset failed, ret=%d\n", ret); + goto l_end; + } + + for (i = 0; i < 30; i++) { + msleep(100); + ret = sxe_hw_phy_reg_read(hw, prtad, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (ret) { + goto l_end; + } + + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } + } + + if (ctrl & MDIO_CTRL1_RESET) { + LOG_DEV_ERR("phy reset polling failed to complete\n"); + return -SXE_ERR_PHY_RESET_FAIL; + } + +l_end: + return ret; +} + +static const struct sxe_phy_operations sxe_phy_hw_ops = { + .reg_write = sxe_hw_phy_reg_write, + .reg_read = sxe_hw_phy_reg_read, + .identifier_get = sxe_hw_phy_id_get, + .link_cap_get = sxe_hw_phy_link_cap_get, + .reset = sxe_hw_phy_ctrl_reset, +}; +#endif + +void sxe_hw_ops_init(struct sxe_hw *hw) +{ + hw->setup.ops = &sxe_setup_ops; + hw->irq.ops = &sxe_irq_ops; + hw->mac.ops = &sxe_mac_ops; + hw->dbu.ops = &sxe_dbu_ops; + hw->dma.ops = &sxe_dma_ops; + hw->sec.ops = &sxe_sec_ops; + hw->stat.ops = &sxe_stat_ops; + hw->mbx.ops = &sxe_mbx_ops; + hw->pcie.ops = &sxe_pcie_ops; + hw->hdc.ops = &sxe_hdc_ops; +#ifdef SXE_PHY_CONFIGURE + hw->phy.ops = &sxe_phy_hw_ops; +#endif + + hw->filter.mac.ops = &sxe_filter_mac_ops; + hw->filter.vlan.ops = &sxe_filter_vlan_ops; + return; +} + +u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx) +{ + u32 rss_key; + + if (reg_idx >= SXE_MAX_RSS_KEY_ENTRIES) { + rss_key = 0; + } else { + rss_key = SXE_REG_READ(hw, SXE_RSSRK(reg_idx)); + } + + return rss_key; +} + +bool sxe_hw_is_rss_enabled(struct sxe_hw *hw) +{ + bool rss_enable = false; + u32 mrqc = SXE_REG_READ(hw, SXE_MRQC); + if (mrqc & SXE_MRQC_RSSEN) { + rss_enable = true; + } + + return rss_enable; +} + +static u32 sxe_hw_mrqc_reg_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_MRQC); +} + +u32 sxe_hw_rss_field_get(struct sxe_hw *hw) +{ + u32 mrqc = sxe_hw_mrqc_reg_get(hw); + return (mrqc & SXE_RSS_FIELD_MASK); +} + +#ifdef SXE_DPDK + +#define SXE_TRAFFIC_CLASS_MAX 8 + +#define SXE_MR_VLAN_MSB_REG_OFFSET 4 +#define SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET 4 + +#define SXE_MR_TYPE_MASK 0x0F +#define SXE_MR_DST_POOL_OFFSET 8 + +void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc) +{ + u32 crcflag = SXE_REG_READ(hw, SXE_CRC_STRIP_REG); + + if (keep_crc) { + crcflag |= SXE_KEEP_CRC_EN; + } else { + crcflag &= ~SXE_KEEP_CRC_EN; + } + + SXE_REG_WRITE(hw, SXE_CRC_STRIP_REG, crcflag); + return; +} + +void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize) +{ + u32 rxpbsize = pbsize << SXE_RX_PKT_BUF_SIZE_SHIFT; + + sxe_hw_rx_pkt_buf_switch(hw, false); + SXE_REG_WRITE(hw, SXE_RXPBSIZE(tc_idx), rxpbsize); + sxe_hw_rx_pkt_buf_switch(hw, true); + + return; +} + +void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools) +{ + u16 pbsize; + u8 i, nb_tcs; + u32 mrqc; + + nb_tcs = SXE_VMDQ_DCB_NUM_QUEUES / num_pools; + + pbsize = (u8)(SXE_RX_PKT_BUF_SIZE / nb_tcs); + + for (i = 0; i < nb_tcs; i++) { + sxe_hw_rx_pkt_buf_size_set(hw, i, pbsize); + } + + for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + sxe_hw_rx_pkt_buf_size_set(hw, i, 0); + } + + mrqc = (num_pools == RTE_ETH_16_POOLS) ? + SXE_MRQC_VMDQRT8TCEN : SXE_MRQC_VMDQRT4TCEN; + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + SXE_REG_WRITE(hw, SXE_RTRPCS, SXE_RTRPCS_RRM); + + return; +} + +static const struct sxe_reg_info sxe_regs_general_group[] = { + {SXE_CTRL, 1, 1, "SXE_CTRL"}, + {SXE_STATUS, 1, 1, "SXE_STATUS"}, + {SXE_CTRL_EXT, 1, 1, "SXE_CTRL_EXT"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_interrupt_group[] = { + {SXE_EICS, 1, 1, "SXE_EICS"}, + {SXE_EIMS, 1, 1, "SXE_EIMS"}, + {SXE_EIMC, 1, 1, "SXE_EIMC"}, + {SXE_EIAC, 1, 1, "SXE_EIAC"}, + {SXE_EIAM, 1, 1, "SXE_EIAM"}, + {SXE_EITR(0), 24, 4, "SXE_EITR"}, + {SXE_IVAR(0), 24, 4, "SXE_IVAR"}, + {SXE_GPIE, 1, 1, "SXE_GPIE"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_fctl_group[] = { + {SXE_PFCTOP, 1, 1, "SXE_PFCTOP"}, + {SXE_FCRTV, 1, 1, "SXE_FCRTV"}, + {SXE_TFCS, 1, 1, "SXE_TFCS"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_rxdma_group[] = { + {SXE_RDBAL(0), 64, 0x40, "SXE_RDBAL"}, + {SXE_RDBAH(0), 64, 0x40, "SXE_RDBAH"}, + {SXE_RDLEN(0), 64, 0x40, "SXE_RDLEN"}, + {SXE_RDH(0), 64, 0x40, "SXE_RDH"}, + {SXE_RDT(0), 64, 0x40, "SXE_RDT"}, + {SXE_RXDCTL(0), 64, 0x40, "SXE_RXDCTL"}, + {SXE_SRRCTL(0), 16, 0x4, "SXE_SRRCTL"}, + {SXE_TPH_RXCTRL(0), 16, 4, "SXE_TPH_RXCTRL"}, + {SXE_RDRXCTL, 1, 1, "SXE_RDRXCTL"}, + {SXE_RXPBSIZE(0), 8, 4, "SXE_RXPBSIZE"}, + {SXE_RXCTRL, 1, 1, "SXE_RXCTRL"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_rx_group[] = { + {SXE_RXCSUM, 1, 1, "SXE_RXCSUM"}, + {SXE_RFCTL, 1, 1, "SXE_RFCTL"}, + {SXE_RAL(0), 16, 8, "SXE_RAL"}, + {SXE_RAH(0), 16, 8, "SXE_RAH"}, + {SXE_PSRTYPE(0), 1, 4, "SXE_PSRTYPE"}, + {SXE_FCTRL, 1, 1, "SXE_FCTRL"}, + {SXE_VLNCTRL, 1, 1, "SXE_VLNCTRL"}, + {SXE_MCSTCTRL, 1, 1, "SXE_MCSTCTRL"}, + {SXE_MRQC, 1, 1, "SXE_MRQC"}, + {SXE_VMD_CTL, 1, 1, "SXE_VMD_CTL"}, + + {0, 0, 0, ""} +}; + +static struct sxe_reg_info sxe_regs_tx_group[] = { + {SXE_TDBAL(0), 32, 0x40, "SXE_TDBAL"}, + {SXE_TDBAH(0), 32, 0x40, "SXE_TDBAH"}, + {SXE_TDLEN(0), 32, 0x40, "SXE_TDLEN"}, + {SXE_TDH(0), 32, 0x40, "SXE_TDH"}, + {SXE_TDT(0), 32, 0x40, "SXE_TDT"}, + {SXE_TXDCTL(0), 32, 0x40, "SXE_TXDCTL"}, + {SXE_TPH_TXCTRL(0), 16, 4, "SXE_TPH_TXCTRL"}, + {SXE_TXPBSIZE(0), 8, 4, "SXE_TXPBSIZE"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_wakeup_group[] = { + {SXE_WUC, 1, 1, "SXE_WUC"}, + {SXE_WUFC, 1, 1, "SXE_WUFC"}, + {SXE_WUS, 1, 1, "SXE_WUS"}, + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_dcb_group[] = { + {0, 0, 0, ""} +}; + +static const struct sxe_reg_info sxe_regs_diagnostic_group[] = { + + {SXE_MFLCN, 1, 1, "SXE_MFLCN"}, + {0, 0, 0, ""}, +}; + +static const struct sxe_reg_info *sxe_regs_group[] = { + sxe_regs_general_group, + sxe_regs_interrupt_group, + sxe_regs_fctl_group, + sxe_regs_rxdma_group, + sxe_regs_rx_group, + sxe_regs_tx_group, + sxe_regs_wakeup_group, + sxe_regs_dcb_group, + sxe_regs_diagnostic_group, + NULL}; + +static u32 sxe_regs_group_count(const struct sxe_reg_info *regs) +{ + int i = 0; + int count = 0; + + while (regs[i].count) { + count += regs[i++].count; + } + + return count; +}; + +static u32 sxe_hw_regs_group_read(struct sxe_hw *hw, + const struct sxe_reg_info *regs, + u32 *reg_buf) +{ + u32 j, i = 0; + int count = 0; + + while (regs[i].count) { + for (j = 0; j < regs[i].count; j++) { + reg_buf[count + j] = SXE_REG_READ(hw, + regs[i].addr + j * regs[i].stride); + LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n", + regs[i].name , regs[i].addr, reg_buf[count + j]); + } + + i++; + count += j; + } + + return count; +}; + +u32 sxe_hw_all_regs_group_num_get(void) +{ + u32 i = 0; + u32 count = 0; + const struct sxe_reg_info *reg_group; + const struct sxe_reg_info **reg_set = sxe_regs_group; + + while ((reg_group = reg_set[i++])) { + count += sxe_regs_group_count(reg_group); + } + + return count; +} + +void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data) +{ + u32 count = 0, i = 0; + const struct sxe_reg_info *reg_group; + const struct sxe_reg_info **reg_set = sxe_regs_group; + + while ((reg_group = reg_set[i++])) { + count += sxe_hw_regs_group_read(hw, reg_group, &data[count]); + } + + LOG_INFO("read regs cnt=%u, regs num=%u\n", + count, sxe_hw_all_regs_group_num_get()); + + return; +} + +static void sxe_hw_default_pool_configure(struct sxe_hw *hw, + u8 default_pool_enabled, + u8 default_pool_idx) +{ + u32 vt_ctl; + + vt_ctl = SXE_VT_CTL_VT_ENABLE | SXE_VT_CTL_REPLEN; + if (default_pool_enabled) { + vt_ctl |= (default_pool_idx << SXE_VT_CTL_POOL_SHIFT); + } else { + vt_ctl |= SXE_VT_CTL_DIS_DEFPL; + } + + SXE_REG_WRITE(hw, SXE_VT_CTL, vt_ctl); + return; +} + +void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw, + u8 default_pool_enabled, + u8 default_pool_idx) +{ + sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx); + return; +} + +u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx) +{ + u32 mask; + + if (idx == 0) { + mask = SXE_REG_READ(hw, SXE_EIMS_EX(0)); + } else { + mask = SXE_REG_READ(hw, SXE_EIMS_EX(1)); + } + + return mask; +} + +void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value) +{ + if (idx == 0) { + SXE_REG_WRITE(hw, SXE_EIMS_EX(0), value); + } else { + SXE_REG_WRITE(hw, SXE_EIMS_EX(1), value); + } + + return; +} + +void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw, + u8 *tc_arr) +{ + u32 up2tc; + u8 i; + + up2tc = 0; + for (i = 0; i < MAX_USER_PRIORITY; i++) { + up2tc |= ((tc_arr[i] & 0x07) << (i * 3)); + } + + SXE_REG_WRITE(hw, SXE_RTRUP2TC, up2tc); + + return; +} + +u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx) +{ + return SXE_REG_READ(hw, SXE_UTA(reg_idx)); +} + +void sxe_hw_uta_hash_table_set(struct sxe_hw *hw, + u8 reg_idx, u32 value) +{ + SXE_REG_WRITE(hw, SXE_UTA(reg_idx), value); + + return; +} + +u32 sxe_hw_vlan_type_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_VLNCTRL); +} + +void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_VLNCTRL, value); + return; +} + +void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw, + u8 num_pools) +{ + u32 vlanctrl; + u8 i; + + vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL); + vlanctrl |= SXE_VLNCTRL_VFE; + SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl); + + for (i = 0; i < SXE_VFT_TBL_SIZE; i++) { + SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF); + } + + SXE_REG_WRITE(hw, SXE_VFRE(0), + num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF); + SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF); + + return; +} + +void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_EXVET, value); + return; +} + +u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_DMATXCTL); +} + +void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_DMATXCTL, value); + return; +} + +u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw) +{ + return SXE_REG_READ(hw, SXE_CTRL_EXT); +} + +void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value) +{ + SXE_REG_WRITE(hw, SXE_CTRL_EXT, value); + return; +} + +void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value) +{ + SXE_REG_WRITE(hw, SXE_RQSMR(idx), value); + return; +} + +void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw, + u8 pool_idx, u16 vlan_id, + u64 pools_map) +{ + SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN | + (vlan_id & 0xFFF))); + + SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), pools_map); + + return; +} + +void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value) +{ + SXE_REG_WRITE(hw, SXE_TQSM(idx), value); + return; +} + +void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on, + u8 sriov_active, u8 tc_num) +{ + u32 reg; + u32 vlanctrl; + u8 i; + u32 q; + + reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC | SXE_RTRPCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTRPCS, reg); + + reg = SXE_REG_READ(hw, SXE_MRQC); + if (tc_num == 4) { + if (is_vt_on) { + reg = (reg & ~SXE_MRQC_MRQE_MASK) | + SXE_MRQC_VMDQRT4TCEN; + } else { + SXE_REG_WRITE(hw, SXE_VT_CTL, 0); + reg = (reg & ~SXE_MRQC_MRQE_MASK) | + SXE_MRQC_RTRSS4TCEN; + } + } + + if (tc_num == 8) { + if (is_vt_on) { + reg = (reg & ~SXE_MRQC_MRQE_MASK) | + SXE_MRQC_VMDQRT8TCEN; + } else { + SXE_REG_WRITE(hw, SXE_VT_CTL, 0); + reg = (reg & ~SXE_MRQC_MRQE_MASK) | + SXE_MRQC_RTRSS8TCEN; + } + } + + SXE_REG_WRITE(hw, SXE_MRQC, reg); + + if (sriov_active == 0) { + for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) { + SXE_REG_WRITE(hw, SXE_QDE, + (SXE_QDE_WRITE | + (q << SXE_QDE_IDX_SHIFT))); + } + } else { + for (q = 0; q < SXE_HW_TXRX_RING_NUM_MAX; q++) { + SXE_REG_WRITE(hw, SXE_QDE, + (SXE_QDE_WRITE | + (q << SXE_QDE_IDX_SHIFT) | + SXE_QDE_ENABLE)); + } + } + + vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL); + vlanctrl |= SXE_VLNCTRL_VFE; + SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl); + + for (i = 0; i < SXE_VFT_TBL_SIZE; i++) { + SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF); + } + + reg = SXE_RTRPCS_RRM | SXE_RTRPCS_RAC; + SXE_REG_WRITE(hw, SXE_RTRPCS, reg); + + return; +} + +void sxe_hw_fc_status_get(struct sxe_hw *hw, + bool *rx_pause_on, bool *tx_pause_on) +{ + u32 flctrl; + + flctrl = SXE_REG_READ(hw, SXE_FLCTRL); + if (flctrl & (SXE_FCTRL_RFCE_PFC_EN | SXE_FCTRL_RFCE_LFC_EN)) { + *rx_pause_on = true; + } else { + *rx_pause_on = false; + } + + if (flctrl & (SXE_FCTRL_TFCE_PFC_EN | SXE_FCTRL_TFCE_LFC_EN)) { + *tx_pause_on = true; + } else { + *tx_pause_on = false; + } + + return; +} + +void sxe_hw_fc_base_init(struct sxe_hw *hw) +{ + u8 i; + + hw->fc.requested_mode = SXE_FC_NONE; + hw->fc.current_mode = SXE_FC_NONE; + hw->fc.pause_time = SXE_DEFAULT_FCPAUSE; + hw->fc.disable_fc_autoneg = false; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + hw->fc.low_water[i] = SXE_FC_DEFAULT_LOW_WATER_MARK; + hw->fc.high_water[i] = SXE_FC_DEFAULT_HIGH_WATER_MARK; + } + + hw->fc.send_xon = 1; + return; +} + +u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx) +{ + return hw->fc.high_water[tc_idx]; +} + +u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx) +{ + return hw->fc.low_water[tc_idx]; +} + +u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw) +{ + return hw->fc.send_xon; +} + +void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon) +{ + hw->fc.send_xon = send_xon; + return; +} + +u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw) +{ + return hw->fc.pause_time; +} + +void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time) +{ + hw->fc.pause_time = pause_time; + return; +} + +void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 tc_num) +{ + u32 reg; + + reg = SXE_REG_READ(hw, SXE_RTTDCS); + reg |= SXE_RTTDCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTTDCS, reg); + + if (tc_num == 8) { + reg = SXE_MTQC_RT_ENA | SXE_MTQC_8TC_8TQ; + } else { + reg = SXE_MTQC_RT_ENA | SXE_MTQC_4TC_4TQ; + } + + if (is_vt_on) { + reg |= SXE_MTQC_VT_ENA; + } + + SXE_REG_WRITE(hw, SXE_MTQC, reg); + + reg = SXE_REG_READ(hw, SXE_RTTDCS); + reg &= ~SXE_RTTDCS_ARBDIS; + SXE_REG_WRITE(hw, SXE_RTTDCS, reg); + + + return; +} + +void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw, + bool is_on) +{ + u32 rxcsum; + + rxcsum = SXE_REG_READ(hw, SXE_RXCSUM); + if (is_on) { + rxcsum |= SXE_RXCSUM_IPPCSE; + } else { + rxcsum &= ~SXE_RXCSUM_IPPCSE; + } + + SXE_REG_WRITE(hw, SXE_RXCSUM, rxcsum); + + return; +} + +void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on) +{ + u32 mrqc = SXE_REG_READ(hw, SXE_MRQC); + if (is_on) { + mrqc |= SXE_MRQC_RSSEN; + } else { + mrqc &= ~SXE_MRQC_RSSEN; + } + + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num) +{ + SXE_REG_WRITE(hw, SXE_VFTE(reg_idx), + pool_num == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + return; +} + +void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field) +{ + u32 mrqc = SXE_REG_READ(hw, SXE_MRQC); + + mrqc &= ~SXE_RSS_FIELD_MASK; + mrqc |= rss_field; + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +static void sxe_hw_dcb_4tc_vmdq_off_stats_configure(struct sxe_hw *hw) +{ + u32 reg; + u8 i; + + for (i = 0; i < 32; i++) { + if (i % 8 > 3) { + continue; + } + + reg = 0x01010101 * (i / 8); + SXE_REG_WRITE(hw, SXE_RQSMR(i), reg); + } + for (i = 0; i < 32; i++) { + if (i < 16) { + reg = 0x00000000; + } else if (i < 24) { + reg = 0x01010101; + } else if (i < 28) { + reg = 0x02020202; + } else { + reg = 0x03030303; + } + + SXE_REG_WRITE(hw, SXE_TQSM(i), reg); + } + + return; +} + +static void sxe_hw_dcb_4tc_vmdq_on_stats_configure(struct sxe_hw *hw) +{ + u8 i; + + for (i = 0; i < 32; i++) { + SXE_REG_WRITE(hw, SXE_RQSMR(i), 0x03020100); + } + + + for (i = 0; i < 32; i++) { + SXE_REG_WRITE(hw, SXE_TQSM(i), 0x03020100); + } + + return; +} + +void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw, + u16 reg_idx, u32 value) +{ + return sxe_hw_rss_redir_tbl_reg_write(hw, reg_idx, value); +} + +static u32 sxe_hw_rss_redir_tbl_reg_read(struct sxe_hw *hw, u16 reg_idx) +{ + return SXE_REG_READ(hw, SXE_RETA(reg_idx >> 2)); +} + +u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16 reg_idx) +{ + return sxe_hw_rss_redir_tbl_reg_read(hw, reg_idx); +} + +void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_TIMINC, 0); + return; +} + +void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw, + u8 tc_num, bool vmdq_active) +{ + if (tc_num == 8 && vmdq_active == false) { + sxe_hw_dcb_8tc_vmdq_off_stats_configure(hw); + } else if (tc_num == 4 && vmdq_active == false) { + sxe_hw_dcb_4tc_vmdq_off_stats_configure(hw); + } else if (tc_num == 4 && vmdq_active == true) { + sxe_hw_dcb_4tc_vmdq_on_stats_configure(hw); + } + + return; +} + +void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw) +{ + SXE_REG_WRITE(hw, SXE_TSYNCTXCTL, + (SXE_REG_READ(hw, SXE_TSYNCTXCTL) & + ~SXE_TSYNCTXCTL_TEN)); + + SXE_REG_WRITE(hw, SXE_TSYNCRXCTL, + (SXE_REG_READ(hw, SXE_TSYNCRXCTL) & + ~SXE_TSYNCRXCTL_REN)); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx) +{ + struct sxe_adapter *adapter = hw->adapter; + + if (rar_idx > SXE_UC_ENTRY_NUM_MAX) { + LOG_ERROR_BDF("rar_idx:%d invalid.(err:%d)\n", + rar_idx, SXE_ERR_PARAM); + goto l_end; + } + + SXE_REG_WRITE(hw, SXE_MPSAR_LOW(rar_idx), 0); + SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(rar_idx), 0); + +l_end: + return; +} + +void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw) +{ + u32 mrqc; + + mrqc = SXE_MRQC_VMDQEN; + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw, + u8 default_pool_enabled, + u8 default_pool_idx) +{ + sxe_hw_default_pool_configure(hw, default_pool_enabled, default_pool_idx); + return; +} + +void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw, + u8 num_pools, u32 rx_mode) +{ + u32 vlanctrl; + u8 i; + + vlanctrl = SXE_REG_READ(hw, SXE_VLNCTRL); + vlanctrl |= SXE_VLNCTRL_VFE; + SXE_REG_WRITE(hw, SXE_VLNCTRL, vlanctrl); + + for (i = 0; i < SXE_VFT_TBL_SIZE; i++) { + SXE_REG_WRITE(hw, SXE_VFTA(i), 0xFFFFFFFF); + } + + SXE_REG_WRITE(hw, SXE_VFRE(0), 0xFFFFFFFF); + if (num_pools == RTE_ETH_64_POOLS) { + SXE_REG_WRITE(hw, SXE_VFRE(1), 0xFFFFFFFF); + } + + for (i = 0; i < num_pools; i++) { + SXE_REG_WRITE(hw, SXE_VMOLR(i), rx_mode); + } + + SXE_REG_WRITE(hw, SXE_MPSAR_LOW(0), 0xFFFFFFFF); + SXE_REG_WRITE(hw, SXE_MPSAR_HIGH(0), 0xFFFFFFFF); + + SXE_WRITE_FLUSH(hw); + return; +} + +u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw) +{ + + return SXE_REG_READ(hw, SXE_GCR_EXT); +} + +void sxe_rx_fc_threshold_set(struct sxe_hw *hw) +{ + u8 i; + u32 high; + + for (i = 0; i < SXE_TRAFFIC_CLASS_MAX; i++) { + SXE_REG_WRITE(hw, SXE_FCRTL(i), 0); + high = SXE_REG_READ(hw, SXE_RXPBSIZE(i)) - 32; + SXE_REG_WRITE(hw, SXE_FCRTH(i), high); + } + + return; +} + +void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw, + u8 pool_idx, u16 vlan_id, + u64 pools_map) +{ + SXE_REG_WRITE(hw, SXE_VLVF(pool_idx), (SXE_VLVF_VIEN | + (vlan_id & SXE_RXD_VLAN_ID_MASK))); + + if (((pools_map >> 32) & 0xFFFFFFFF) == 0) { + SXE_REG_WRITE(hw, SXE_VLVFB(pool_idx * 2), + (pools_map & 0xFFFFFFFF)); + } else { + SXE_REG_WRITE(hw, SXE_VLVFB((pool_idx * 2 + 1)), + ((pools_map >> 32) & 0xFFFFFFFF)); + } + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw) +{ + u8 i; + SXE_REG_WRITE(hw, SXE_PFDTXGSWC, SXE_PFDTXGSWC_VT_LBEN); + for (i = 0; i < SXE_VMTXSW_REGISTER_COUNT; i++) { + SXE_REG_WRITE(hw, SXE_VMTXSW(i), 0xFFFFFFFF); + } + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw, + bool vmdq_enable, bool sriov_enable, u16 pools_num) +{ + u32 mtqc; + + sxe_hw_dcb_arbiter_set(hw, false); + + if (sriov_enable) { + switch (pools_num) { + case RTE_ETH_64_POOLS: + mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF; + break; + case RTE_ETH_32_POOLS: + mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_32VF; + break; + case RTE_ETH_16_POOLS: + mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_RT_ENA | + SXE_MTQC_8TC_8TQ; + break; + default: + mtqc = SXE_MTQC_64Q_1PB; + } + } else { + if (vmdq_enable) { + u8 queue_idx; + SXE_REG_WRITE(hw, SXE_VFTE(0), UINT32_MAX); + SXE_REG_WRITE(hw, SXE_VFTE(1), UINT32_MAX); + + for (queue_idx = 0; queue_idx < SXE_HW_TXRX_RING_NUM_MAX; + queue_idx++) { + SXE_REG_WRITE(hw, SXE_QDE, + (SXE_QDE_WRITE | + (queue_idx << SXE_QDE_IDX_SHIFT))); + } + + mtqc = SXE_MTQC_VT_ENA | SXE_MTQC_64VF; + } else { + mtqc = SXE_MTQC_64Q_1PB; + } + } + + SXE_REG_WRITE(hw, SXE_MTQC, mtqc); + + sxe_hw_dcb_arbiter_set(hw, true); + + return; +} + +void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx, + u8 ring_per_pool) +{ + u32 value; + u8 i; + + for (i = (vf_idx * ring_per_pool); i < ((vf_idx + 1) * ring_per_pool); i++) + { + value = SXE_QDE_ENABLE | SXE_QDE_WRITE; + SXE_WRITE_FLUSH(hw); + + value |= i << SXE_QDE_IDX_SHIFT; + + SXE_REG_WRITE(hw, SXE_QDE, value); + } + + return; +} + +bool sxe_hw_vt_status(struct sxe_hw *hw) +{ + bool ret; + u32 vt_ctl = SXE_REG_READ(hw, SXE_VT_CTL); + + if (vt_ctl & SXE_VMD_CTL_POOL_EN) { + ret = true; + } else { + ret = false; + } + + return ret; +} + +void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id, + u8 mirror_type, u8 dst_pool, bool on) +{ + u32 mr_ctl; + + mr_ctl = SXE_REG_READ(hw, SXE_MRCTL(rule_id)); + + if (on) { + mr_ctl |= mirror_type; + mr_ctl &= SXE_MR_TYPE_MASK; + mr_ctl |= dst_pool << SXE_MR_DST_POOL_OFFSET; + } else { + mr_ctl &= ~(mirror_type & SXE_MR_TYPE_MASK); + } + + SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), mr_ctl); + + return; +} + +void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb) +{ + SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), lsb); + SXE_REG_WRITE(hw, SXE_VMRVM(rule_id + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), msb); + + return; +} + +void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 rule_id,u32 lsb, u32 msb) +{ + SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), lsb); + SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id + SXE_MR_VLAN_MSB_REG_OFFSET), msb); + + return; +} + +void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id) +{ + SXE_REG_WRITE(hw, SXE_MRCTL(rule_id), 0); + + SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id), 0); + SXE_REG_WRITE(hw, SXE_VMRVLAN(rule_id + SXE_MR_VLAN_MSB_REG_OFFSET), 0); + + SXE_REG_WRITE(hw, SXE_VMRVM(rule_id), 0); + SXE_REG_WRITE(hw, SXE_VMRVM(rule_id + SXE_MR_VIRTUAL_POOL_MSB_REG_OFFSET), 0); + + return; +} + +#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL +void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev, + struct sxe_fivetuple_node_info *filter) +{ + struct sxe_adapter *adapter = dev->data->dev_private; + struct sxe_hw *hw = &adapter->hw; + u16 i; + u32 ftqf, sdpqf; + u32 l34timir = 0; + u8 mask = 0xff; + + i = filter->index; + + sdpqf = (u32)(filter->filter_info.dst_port << SXE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & SXE_SDPQF_SRCPORT); + + ftqf = (u32)(filter->filter_info.protocol & SXE_FTQF_PROTOCOL_MASK); + ftqf |= (u32)((filter->filter_info.priority & + SXE_FTQF_PRIORITY_MASK) << SXE_FTQF_PRIORITY_SHIFT); + + if (filter->filter_info.src_ip_mask == 0) { + mask &= SXE_FTQF_SOURCE_ADDR_MASK; + } + if (filter->filter_info.dst_ip_mask == 0) { + mask &= SXE_FTQF_DEST_ADDR_MASK; + } + if (filter->filter_info.src_port_mask == 0) { + mask &= SXE_FTQF_SOURCE_PORT_MASK; + } + if (filter->filter_info.dst_port_mask == 0) { + mask &= SXE_FTQF_DEST_PORT_MASK; + } + if (filter->filter_info.proto_mask == 0) { + mask &= SXE_FTQF_PROTOCOL_COMP_MASK; + } + ftqf |= mask << SXE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= SXE_FTQF_POOL_MASK_EN; + ftqf |= SXE_FTQF_QUEUE_ENABLE; + + LOG_DEBUG("add fivetuple filter, index[%u], src_ip[0x%x], dst_ip[0x%x]" + "src_port[%u], dst_port[%u], ftqf[0x%x], queue[%u]", i, filter->filter_info.src_ip, + filter->filter_info.dst_ip, filter->filter_info.src_port, filter->filter_info.dst_port, + ftqf, filter->queue); + + SXE_REG_WRITE(hw, SXE_DAQF(i), filter->filter_info.dst_ip); + SXE_REG_WRITE(hw, SXE_SAQF(i), filter->filter_info.src_ip); + SXE_REG_WRITE(hw, SXE_SDPQF(i), sdpqf); + SXE_REG_WRITE(hw, SXE_FTQF(i), ftqf); + + l34timir |= SXE_L34T_IMIR_RESERVE; + l34timir |= (u32)(filter->queue << SXE_L34T_IMIR_QUEUE_SHIFT); + SXE_REG_WRITE(hw, SXE_L34T_IMIR(i), l34timir); + + return; +} + +void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index) +{ + SXE_REG_WRITE(hw, SXE_DAQF(reg_index), 0); + SXE_REG_WRITE(hw, SXE_SAQF(reg_index), 0); + SXE_REG_WRITE(hw, SXE_SDPQF(reg_index), 0); + SXE_REG_WRITE(hw, SXE_FTQF(reg_index), 0); + SXE_REG_WRITE(hw, SXE_L34T_IMIR(reg_index), 0); + + return; +} + +void sxe_hw_ethertype_filter_add(struct sxe_hw *hw, + u8 reg_index, u16 ethertype, u16 queue) +{ + u32 etqf = 0; + u32 etqs = 0; + + etqf = SXE_ETQF_FILTER_EN; + etqf |= (u32)ethertype; + etqs |= (u32)((queue << SXE_ETQS_RX_QUEUE_SHIFT) & + SXE_ETQS_RX_QUEUE); + etqs |= SXE_ETQS_QUEUE_EN; + + SXE_REG_WRITE(hw, SXE_ETQF(reg_index), etqf); + SXE_REG_WRITE(hw, SXE_ETQS(reg_index), etqs); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type) +{ + SXE_REG_WRITE(hw, SXE_ETQF(filter_type), 0); + SXE_REG_WRITE(hw, SXE_ETQS(filter_type), 0); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority) +{ + u32 synqf; + + synqf = (u32)(((queue << SXE_SYN_FILTER_QUEUE_SHIFT) & + SXE_SYN_FILTER_QUEUE) | SXE_SYN_FILTER_ENABLE); + + if (priority) { + synqf |= SXE_SYN_FILTER_SYNQFP; + } else { + synqf &= ~SXE_SYN_FILTER_SYNQFP; + } + + SXE_REG_WRITE(hw, SXE_SYNQF, synqf); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_syn_filter_del(struct sxe_hw *hw) +{ + u32 synqf; + + synqf = SXE_REG_READ(hw, SXE_SYNQF); + + synqf &= ~(SXE_SYN_FILTER_QUEUE | SXE_SYN_FILTER_ENABLE); + SXE_REG_WRITE(hw, SXE_SYNQF, synqf); + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize) +{ + S32 i; + + SXE_REG_WRITE(hw, SXE_RXPBSIZE(0), (SXE_REG_READ(hw, SXE_RXPBSIZE(0)) - pbsize)); + for (i = 1; i < 8; i++) { + SXE_REG_WRITE(hw, SXE_RXPBSIZE(i), 0); + } + + return; +} + +void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask) +{ + u32 fnavm; + + fnavm = SXE_REG_READ(hw, SXE_FNAVM); + if (flex_mask == UINT16_MAX) { + fnavm &= ~SXE_FNAVM_FLEX; + } + + SXE_REG_WRITE(hw, SXE_FNAVM, fnavm); + return; +} + +void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask) +{ + u32 fnavipv6m; + + fnavipv6m = (dst_mask << 16) | src_mask; + SXE_REG_WRITE(hw, SXE_FNAVIP6M, ~fnavipv6m); + + return; +} + +s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset) +{ + u32 fnavctrl; + s32 ret; + + fnavctrl = SXE_REG_READ(hw, SXE_FNAVCTRL); + fnavctrl &= ~SXE_FNAVCTRL_FLEX_MASK; + fnavctrl |= ((offset >> 1) + << SXE_FNAVCTRL_FLEX_SHIFT); + + SXE_REG_WRITE(hw, SXE_FNAVCTRL, fnavctrl); + SXE_WRITE_FLUSH(hw); + + ret = sxe_hw_fnav_wait_init_done(hw); + if (ret) { + LOG_ERROR("flow director signature poll time exceeded!\n"); + } + + return ret; +} +#endif + +#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_MACSEC +static void sxe_macsec_stop_data(struct sxe_hw *hw, bool link) +{ + u32 t_rdy, r_rdy; + u32 limit; + u32 reg; + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg |= SXE_SECTXCTRL_TX_DIS; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg |= SXE_SECRXCTRL_RX_DIS; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + SXE_WRITE_FLUSH(hw); + + t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) & + SXE_SECTXSTAT_SECTX_RDY; + r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) & + SXE_SECRXSTAT_SECRX_RDY; + if (t_rdy && r_rdy) + return; + + if (!link) { + SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x1); + + SXE_WRITE_FLUSH(hw); + mdelay(3); + } + + limit = 20; + do { + mdelay(10); + t_rdy = SXE_REG_READ(hw, SXE_SECTXSTAT) & + SXE_SECTXSTAT_SECTX_RDY; + r_rdy = SXE_REG_READ(hw, SXE_SECRXSTAT) & + SXE_SECRXSTAT_SECRX_RDY; + } while (!(t_rdy && r_rdy) && limit--); + + if (!link) { + SXE_REG_WRITE(hw, SXE_LPBKCTRL, 0x0); + SXE_WRITE_FLUSH(hw); + } + + return; +} +void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc) +{ + SXE_REG_WRITE(hw, SXE_MRQC, mrqc); + + return; +} + +void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode, + u32 rx_mode, u32 pn_trh) +{ + u32 reg; + + sxe_macsec_stop_data(hw, is_up); + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg &= ~SXE_SECTXCTRL_SECTX_DIS; + reg &= ~SXE_SECTXCTRL_STORE_FORWARD; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250); + + reg = SXE_REG_READ(hw, SXE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x3; + SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg &= ~SXE_SECRXCTRL_SECRX_DIS; + reg |= SXE_SECRXCTRL_RP; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + + reg = tx_mode & SXE_LSECTXCTRL_EN_MASK; + reg |= SXE_LSECTXCTRL_AISCI; + reg &= ~SXE_LSECTXCTRL_PNTHRSH_MASK; + reg |= (pn_trh << SXE_LSECTXCTRL_PNTHRSH_SHIFT); + SXE_REG_WRITE(hw, SXE_LSECTXCTRL, reg); + + reg = (rx_mode << SXE_LSECRXCTRL_EN_SHIFT) & SXE_LSECRXCTRL_EN_MASK; + reg |= SXE_LSECRXCTRL_RP; + reg |= SXE_LSECRXCTRL_DROP_EN; + SXE_REG_WRITE(hw, SXE_LSECRXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg &= ~SXE_SECTXCTRL_TX_DIS; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg &= ~SXE_SECRXCTRL_RX_DIS; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + + SXE_WRITE_FLUSH(hw); + + return; +} + +void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up) +{ + u32 reg; + + sxe_macsec_stop_data(hw, is_up); + + reg = SXE_REG_READ(hw, SXE_SECTXCTRL); + reg |= SXE_SECTXCTRL_SECTX_DIS; + reg &= ~SXE_SECTXCTRL_STORE_FORWARD; + SXE_REG_WRITE(hw, SXE_SECTXCTRL, reg); + + reg = SXE_REG_READ(hw, SXE_SECRXCTRL); + reg |= SXE_SECRXCTRL_SECRX_DIS; + SXE_REG_WRITE(hw, SXE_SECRXCTRL, reg); + + SXE_REG_WRITE(hw, SXE_SECTXBUFFAF, 0x250); + + reg = SXE_REG_READ(hw, SXE_SECTXMINIFG); + reg = (reg & 0xfffffff0) | 0x1; + SXE_REG_WRITE(hw, SXE_SECTXMINIFG, reg); + + SXE_REG_WRITE(hw, SXE_SECTXCTRL, SXE_SECTXCTRL_SECTX_DIS); + SXE_REG_WRITE(hw, SXE_SECRXCTRL, SXE_SECRXCTRL_SECRX_DIS); + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch) +{ + SXE_REG_WRITE(hw, SXE_LSECTXSCL, scl); + SXE_REG_WRITE(hw, SXE_LSECTXSCH, sch); + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi) +{ + u32 reg = sch; + + SXE_REG_WRITE(hw, SXE_LSECRXSCL, scl); + + reg |= (pi << SXE_LSECRXSCH_PI_SHIFT) & SXE_LSECRXSCH_PI_MASK; + SXE_REG_WRITE(hw, SXE_LSECRXSCH, reg); + + SXE_WRITE_FLUSH(hw); + return; + +} + +void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx, + u8 an, u32 pn, u32 *keys) +{ + u32 reg; + u8 i; + + reg = SXE_REG_READ(hw, SXE_LSECTXSA); + reg &= ~SXE_LSECTXSA_SELSA; + reg |= (sa_idx << SXE_LSECTXSA_SELSA_SHIFT) & SXE_LSECTXSA_SELSA; + SXE_REG_WRITE(hw, SXE_LSECTXSA, reg); + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_LSECTXPN(sa_idx), pn); + for (i = 0; i < 4; i++) { + SXE_REG_WRITE(hw, SXE_LSECTXKEY(sa_idx, i), keys[i]); + } + SXE_WRITE_FLUSH(hw); + + reg = SXE_REG_READ(hw, SXE_LSECTXSA); + if (sa_idx == 0) { + reg &= ~SXE_LSECTXSA_AN0_MASK; + reg |= (an << SXE_LSECTXSA_AN0_SHIFT) & SXE_LSECTXSA_AN0_MASK; + reg &= ~SXE_LSECTXSA_SELSA; + SXE_REG_WRITE(hw, SXE_LSECTXSA, reg); + } else if (sa_idx == 1) { + reg &= ~SXE_LSECTXSA_AN1_MASK; + reg |= (an << SXE_LSECTXSA_AN1_SHIFT) & SXE_LSECTXSA_AN1_MASK; + reg |= SXE_LSECTXSA_SELSA; + SXE_REG_WRITE(hw, SXE_LSECTXSA, reg); + } + + SXE_WRITE_FLUSH(hw); + return; +} + +void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx, + u8 an, u32 pn, u32 *keys) +{ + u32 reg; + u8 i; + + reg = SXE_REG_READ(hw, SXE_LSECRXSA(sa_idx)); + reg &= ~SXE_LSECRXSA_SAV; + reg |= (0 << SXE_LSECRXSA_SAV_SHIFT) & SXE_LSECRXSA_SAV; + + SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg); + + SXE_WRITE_FLUSH(hw); + + SXE_REG_WRITE(hw, SXE_LSECRXPN(sa_idx), pn); + + for (i = 0; i < 4; i++) { + SXE_REG_WRITE(hw, SXE_LSECRXKEY(sa_idx, i), keys[i]); + } + SXE_WRITE_FLUSH(hw); + + reg = ((an << SXE_LSECRXSA_AN_SHIFT) & SXE_LSECRXSA_AN_MASK) | SXE_LSECRXSA_SAV; + SXE_REG_WRITE(hw, SXE_LSECRXSA(sa_idx), reg); + SXE_WRITE_FLUSH(hw); + return; +} + +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.h new file mode 100644 index 000000000000..0f2ca4f3668d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_hw.h @@ -0,0 +1,1504 @@ + +#ifndef __SXE_HW_H__ +#define __SXE_HW_H__ + +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#include +#include +#else +#include "sxe_types.h" +#include "sxe_compat_platform.h" +#include "sxe_compat_version.h" +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif +#include +#endif + +#include "sxe_regs.h" + +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#define SXE_PRIU64 "llu" +#define SXE_PRIX64 "llx" +#define SXE_PRID64 "lld" +#define SXE_RMB() rmb() + +#else +#define SXE_PRIU64 PRIu64 +#define SXE_PRIX64 PRIx64 +#define SXE_PRID64 PRId64 +#define SXE_RMB() rte_rmb() +#endif + +struct sxe_hw; +struct sxe_filter_mac; +struct sxe_fc_info; + +#define SXE_MAC_ADDR_LEN 6 +#define SXE_QUEUE_STATS_MAP_REG_NUM 32 + +#define SXE_FC_DEFAULT_HIGH_WATER_MARK 0x80 +#define SXE_FC_DEFAULT_LOW_WATER_MARK 0x40 + +#define SXE_MC_ADDR_EXTRACT_MASK (0xFFF) +#define SXE_MC_ADDR_SHIFT (5) +#define SXE_MC_ADDR_REG_MASK (0x7F) +#define SXE_MC_ADDR_BIT_MASK (0x1F) + +#define SXE_TXTS_POLL_CHECK 3 +#define SXE_TXTS_POLL 5 +#define SXE_TIME_TO_NS(ns, sec) (((u64)(ns)) + (u64)(((u64)(sec)) * NSEC_PER_SEC)) + +enum sxe_strict_prio_type { + PRIO_NONE = 0, + PRIO_GROUP, + PRIO_LINK +}; + +enum sxe_mc_filter_type { + SXE_MC_FILTER_TYPE0 = 0, + SXE_MC_FILTER_TYPE1, + SXE_MC_FILTER_TYPE2, + SXE_MC_FILTER_TYPE3 +}; + +#define SXE_POOLS_NUM_MAX 64 +#define SXE_16_POOL 16 +#define SXE_32_POOL 32 +#define SXE_1_RING_PER_POOL 1 +#define SXE_2_RING_PER_POOL 2 +#define SXE_3_RING_PER_POOL 3 +#define SXE_4_RING_PER_POOL 4 + +#define SXE_DCB_1_TC 1 +#define SXE_DCB_4_TC 4 +#define SXE_DCB_8_TC 8 + +#define SXE_8Q_PER_POOL_MASK 0x78 +#define SXE_4Q_PER_POOL_MASK 0x7C +#define SXE_2Q_PER_POOL_MASK 0x7E + +#define SXE_VF_NUM_16 16 +#define SXE_VF_NUM_32 32 + +#define SXE_TX_DESC_EOP_MASK 0x01000000 +#define SXE_TX_DESC_RS_MASK 0x08000000 +#define SXE_TX_DESC_STAT_DD 0x00000001 +#define SXE_TX_DESC_CMD (SXE_TX_DESC_EOP_MASK | SXE_TX_DESC_RS_MASK) +#define SXE_TX_DESC_TYPE_DATA 0x00300000 +#define SXE_TX_DESC_DEXT 0x20000000 +#define SXE_TX_DESC_IFCS 0x02000000 +#define SXE_TX_DESC_VLE 0x40000000 +#define SXE_TX_DESC_TSTAMP 0x00080000 +#define SXE_TX_DESC_FLAGS (SXE_TX_DESC_TYPE_DATA | \ + SXE_TX_DESC_IFCS | \ + SXE_TX_DESC_DEXT| \ + SXE_TX_DESC_EOP_MASK) +#define SXE_TXD_DTYP_CTXT 0x00200000 +#define SXE_TXD_DCMD_TSE 0x80000000 +#define SXE_TXD_MAC_LINKSEC 0x00040000 +#define SXE_TXD_MAC_1588 0x00080000 +#define SXE_TX_DESC_PAYLEN_SHIFT 14 +#define SXE_TX_OUTERIPCS_SHIFT 17 + +#define SXE_TX_POPTS_IXSM 0x01 +#define SXE_TX_POPTS_TXSM 0x02 +#define SXE_TXD_POPTS_SHIFT 8 +#define SXE_TXD_POPTS_IXSM (SXE_TX_POPTS_IXSM << SXE_TXD_POPTS_SHIFT) +#define SXE_TXD_POPTS_TXSM (SXE_TX_POPTS_TXSM << SXE_TXD_POPTS_SHIFT) +#define SXE_TXD_POPTS_IPSEC (0x00000400) + +#define SXE_TX_CTXTD_DTYP_CTXT 0x00200000 +#define SXE_TX_CTXTD_TUCMD_IPV6 0x00000000 +#define SXE_TX_CTXTD_TUCMD_IPV4 0x00000400 +#define SXE_TX_CTXTD_TUCMD_L4T_UDP 0x00000000 +#define SXE_TX_CTXTD_TUCMD_L4T_TCP 0x00000800 +#define SXE_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000 +#define SXE_TX_CTXTD_TUCMD_L4T_RSV 0x00001800 +#define SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP 0x00002000 +#define SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 + +#define SXE_TX_CTXTD_L4LEN_SHIFT 8 +#define SXE_TX_CTXTD_MSS_SHIFT 16 +#define SXE_TX_CTXTD_MACLEN_SHIFT 9 +#define SXE_TX_CTXTD_VLAN_SHIFT 16 +#define SXE_TX_CTXTD_VLAN_MASK 0xffff0000 +#define SXE_TX_CTXTD_MACLEN_MASK 0x0000fE00 +#define SXE_TX_CTXTD_OUTER_IPLEN_SHIFT 16 +#define SXE_TX_CTXTD_TUNNEL_LEN_SHIFT 24 + +#define SXE_VLAN_TAG_SIZE 4 + +#define SXE_RSS_KEY_SIZE (40) +#define SXE_MAX_RSS_KEY_ENTRIES (10) +#define SXE_MAX_RETA_ENTRIES (128) + +#define SXE_TIMINC_IV_NS_SHIFT 8 +#define SXE_TIMINC_INCPD_SHIFT 24 +#define SXE_TIMINC_SET(incpd, iv_ns, iv_sns) \ + (((incpd) << SXE_TIMINC_INCPD_SHIFT) | \ + ((iv_ns) << SXE_TIMINC_IV_NS_SHIFT) | (iv_sns)) + +#define PBA_STRATEGY_EQUAL (0) +#define PBA_STRATEGY_WEIGHTED (1) +#define SXE_PKG_BUF_NUM_MAX (8) +#define SXE_HW_TXRX_RING_NUM_MAX 128 +#define SXE_VMDQ_DCB_NUM_QUEUES SXE_HW_TXRX_RING_NUM_MAX +#define SXE_RX_PKT_BUF_SIZE (512) + +#define SXE_UC_ENTRY_NUM_MAX 128 +#define SXE_HW_TX_NONE_MODE_Q_NUM 64 + +#define SXE_MBX_MSG_NUM 16 +#define SXE_MBX_RETRY_INTERVAL 500 +#define SXE_MBX_RETRY_COUNT 2000 + +#define SXE_VF_UC_ENTRY_NUM_MAX 10 +#define SXE_VF_MC_ENTRY_NUM_MAX 30 + +#define SXE_UTA_ENTRY_NUM_MAX 128 +#define SXE_MTA_ENTRY_NUM_MAX 128 +#define SXE_HASH_UC_NUM_MAX 4096 + +#define SXE_MAC_ADDR_EXTRACT_MASK (0xFFF) +#define SXE_MAC_ADDR_SHIFT (5) +#define SXE_MAC_ADDR_REG_MASK (0x7F) +#define SXE_MAC_ADDR_BIT_MASK (0x1F) + +#define SXE_VFT_TBL_SIZE (128) +#define SXE_VLAN_ID_SHIFT (5) +#define SXE_VLAN_ID_REG_MASK (0x7F) +#define SXE_VLAN_ID_BIT_MASK (0x1F) + +#define SXE_TX_PBSIZE_MAX 0x00028000 +#define SXE_TX_PKT_SIZE_MAX 0xA +#define SXE_NODCB_TX_PKT_SIZE_MAX 0x14 +#define SXE_RING_ENABLE_WAIT_LOOP 10 + +#define VFTA_BLOCK_SIZE 8 +#define VF_BLOCK_BITS (32) +#define SXE_MAX_MAC_HDR_LEN 127 +#define SXE_MAX_NETWORK_HDR_LEN 511 +#define SXE_MAC_ADDR_LEN 6 + +#define SXE_FNAV_BUCKET_HASH_KEY 0x3DAD14E2 +#define SXE_FNAV_SAMPLE_HASH_KEY 0x174D3614 +#define SXE_SAMPLE_COMMON_HASH_KEY \ + (SXE_FNAV_BUCKET_HASH_KEY & SXE_FNAV_SAMPLE_HASH_KEY) + +#define SXE_SAMPLE_HASH_MASK 0x7fff +#define SXE_SAMPLE_L4TYPE_MASK 0x3 +#define SXE_SAMPLE_L4TYPE_UDP 0x1 +#define SXE_SAMPLE_L4TYPE_TCP 0x2 +#define SXE_SAMPLE_L4TYPE_SCTP 0x3 +#define SXE_SAMPLE_L4TYPE_IPV6_MASK 0x4 +#define SXE_SAMPLE_L4TYPE_TUNNEL_MASK 0x10 +#define SXE_SAMPLE_FLOW_TYPE_MASK 0xF + +#define SXE_SAMPLE_VM_POOL_MASK 0x7F +#define SXE_SAMPLE_VLAN_MASK 0xEFFF +#define SXE_SAMPLE_FLEX_BYTES_MASK 0xFFFF + +#define SXE_FNAV_INIT_DONE_POLL 10 +#define SXE_FNAV_DROP_QUEUE 127 + +#define MAX_TRAFFIC_CLASS 8 +#define DEF_TRAFFIC_CLASS 1 + +#define SXE_LINK_SPEED_UNKNOWN 0 +#define SXE_LINK_SPEED_10_FULL 0x0002 +#define SXE_LINK_SPEED_100_FULL 0x0008 +#define SXE_LINK_SPEED_1GB_FULL 0x0020 +#define SXE_LINK_SPEED_10GB_FULL 0x0080 + +typedef u32 sxe_link_speed; +#ifdef SXE_TEST +#define SXE_LINK_MBPS_SPEED_DEFAULT 1000 +#else +#define SXE_LINK_MBPS_SPEED_DEFAULT 10000 +#endif + +#define SXE_LINK_MBPS_SPEED_MIN (10) + +enum sxe_rss_ip_version { + SXE_RSS_IP_VER_4 = 4, + SXE_RSS_IP_VER_6 = 6, +}; + +enum sxe_fnav_mode { + SXE_FNAV_SAMPLE_MODE = 1, + SXE_FNAV_SPECIFIC_MODE = 2, +}; + +enum sxe_sample_type { + SXE_SAMPLE_FLOW_TYPE_IPV4 = 0x0, + SXE_SAMPLE_FLOW_TYPE_UDPV4 = 0x1, + SXE_SAMPLE_FLOW_TYPE_TCPV4 = 0x2, + SXE_SAMPLE_FLOW_TYPE_SCTPV4 = 0x3, + SXE_SAMPLE_FLOW_TYPE_IPV6 = 0x4, + SXE_SAMPLE_FLOW_TYPE_UDPV6 = 0x5, + SXE_SAMPLE_FLOW_TYPE_TCPV6 = 0x6, + SXE_SAMPLE_FLOW_TYPE_SCTPV6 = 0x7, +}; + +enum { + SXE_DIAG_TEST_PASSED = 0, + SXE_DIAG_TEST_BLOCKED = 1, + SXE_DIAG_STATS_REG_TEST_ERR = 2, + SXE_DIAG_REG_PATTERN_TEST_ERR = 3, + SXE_DIAG_CHECK_REG_TEST_ERR = 4, + SXE_DIAG_DISABLE_IRQ_TEST_ERR = 5, + SXE_DIAG_ENABLE_IRQ_TEST_ERR = 6, + SXE_DIAG_DISABLE_OTHER_IRQ_TEST_ERR = 7, + SXE_DIAG_TX_RING_CONFIGURE_ERR = 8, + SXE_DIAG_RX_RING_CONFIGURE_ERR = 9, + SXE_DIAG_ALLOC_SKB_ERR = 10, + SXE_DIAG_LOOPBACK_SEND_TEST_ERR = 11, + SXE_DIAG_LOOPBACK_RECV_TEST_ERR = 12, +}; + +#define SXE_RXD_STAT_DD 0x01 +#define SXE_RXD_STAT_EOP 0x02 +#define SXE_RXD_STAT_FLM 0x04 +#define SXE_RXD_STAT_VP 0x08 +#define SXE_RXDADV_NEXTP_MASK 0x000FFFF0 +#define SXE_RXDADV_NEXTP_SHIFT 0x00000004 +#define SXE_RXD_STAT_UDPCS 0x10 +#define SXE_RXD_STAT_L4CS 0x20 +#define SXE_RXD_STAT_IPCS 0x40 +#define SXE_RXD_STAT_PIF 0x80 +#define SXE_RXD_STAT_CRCV 0x100 +#define SXE_RXD_STAT_OUTERIPCS 0x100 +#define SXE_RXD_STAT_VEXT 0x200 +#define SXE_RXD_STAT_UDPV 0x400 +#define SXE_RXD_STAT_DYNINT 0x800 +#define SXE_RXD_STAT_LLINT 0x800 +#define SXE_RXD_STAT_TSIP 0x08000 +#define SXE_RXD_STAT_TS 0x10000 +#define SXE_RXD_STAT_SECP 0x20000 +#define SXE_RXD_STAT_LB 0x40000 +#define SXE_RXD_STAT_ACK 0x8000 +#define SXE_RXD_ERR_CE 0x01 +#define SXE_RXD_ERR_LE 0x02 +#define SXE_RXD_ERR_PE 0x08 +#define SXE_RXD_ERR_OSE 0x10 +#define SXE_RXD_ERR_USE 0x20 +#define SXE_RXD_ERR_TCPE 0x40 +#define SXE_RXD_ERR_IPE 0x80 +#define SXE_RXDADV_ERR_MASK 0xfff00000 +#define SXE_RXDADV_ERR_SHIFT 20 +#define SXE_RXDADV_ERR_OUTERIPER 0x04000000 +#define SXE_RXDADV_ERR_FCEOFE 0x80000000 +#define SXE_RXDADV_ERR_FCERR 0x00700000 +#define SXE_RXDADV_ERR_FNAV_LEN 0x00100000 +#define SXE_RXDADV_ERR_FNAV_DROP 0x00200000 +#define SXE_RXDADV_ERR_FNAV_COLL 0x00400000 +#define SXE_RXDADV_ERR_HBO 0x00800000 +#define SXE_RXDADV_ERR_CE 0x01000000 +#define SXE_RXDADV_ERR_LE 0x02000000 +#define SXE_RXDADV_ERR_PE 0x08000000 +#define SXE_RXDADV_ERR_OSE 0x10000000 +#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 +#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 +#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000 +#define SXE_RXDADV_ERR_USE 0x20000000 +#define SXE_RXDADV_ERR_L4E 0x40000000 +#define SXE_RXDADV_ERR_IPE 0x80000000 +#define SXE_RXD_VLAN_ID_MASK 0x0FFF +#define SXE_RXD_PRI_MASK 0xE000 +#define SXE_RXD_PRI_SHIFT 13 +#define SXE_RXD_CFI_MASK 0x1000 +#define SXE_RXD_CFI_SHIFT 12 +#define SXE_RXDADV_LROCNT_MASK 0x001E0000 +#define SXE_RXDADV_LROCNT_SHIFT 17 + +#define SXE_RXDADV_STAT_DD SXE_RXD_STAT_DD +#define SXE_RXDADV_STAT_EOP SXE_RXD_STAT_EOP +#define SXE_RXDADV_STAT_FLM SXE_RXD_STAT_FLM +#define SXE_RXDADV_STAT_VP SXE_RXD_STAT_VP +#define SXE_RXDADV_STAT_MASK 0x000fffff +#define SXE_RXDADV_STAT_TS 0x00010000 +#define SXE_RXDADV_STAT_SECP 0x00020000 + +#define SXE_RXDADV_PKTTYPE_NONE 0x00000000 +#define SXE_RXDADV_PKTTYPE_IPV4 0x00000010 +#define SXE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 +#define SXE_RXDADV_PKTTYPE_IPV6 0x00000040 +#define SXE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 +#define SXE_RXDADV_PKTTYPE_TCP 0x00000100 +#define SXE_RXDADV_PKTTYPE_UDP 0x00000200 +#define SXE_RXDADV_PKTTYPE_SCTP 0x00000400 +#define SXE_RXDADV_PKTTYPE_NFS 0x00000800 +#define SXE_RXDADV_PKTTYPE_VXLAN 0x00000800 +#define SXE_RXDADV_PKTTYPE_TUNNEL 0x00010000 +#define SXE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 +#define SXE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 +#define SXE_RXDADV_PKTTYPE_LINKSEC 0x00004000 +#define SXE_RXDADV_PKTTYPE_ETQF 0x00008000 +#define SXE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 +#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4 + +struct sxe_mac_stats { + u64 crcerrs; + u64 errbc; + u64 rlec; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 dburxtcin[8]; + u64 dburxtcout[8]; + u64 dburxgdreecnt[8]; + u64 dburxdrofpcnt[8]; + u64 dbutxtcin[8]; + u64 dbutxtcout[8]; + u64 rxdgpc; + u64 rxdgbc; + u64 rxddpc; + u64 rxddbc; + u64 rxtpcing; + u64 rxtpceng; + u64 rxlpbkpc; + u64 rxlpbkbc; + u64 rxdlpbkpc; + u64 rxdlpbkbc; + u64 prddc; + u64 txdgpc; + u64 txdgbc; + u64 txswerr; + u64 txswitch; + u64 txrepeat; + u64 txdescerr; + + u64 fnavadd; + u64 fnavrmv; + u64 fnavadderr; + u64 fnavrmverr; + u64 fnavmatch; + u64 fnavmiss; + u64 hw_rx_no_dma_resources; + u64 prcpf[8]; + u64 pfct[8]; + u64 mpc[8]; + + u64 total_tx_pause; + u64 total_gptc; + u64 total_gotc; +}; + +#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL +enum sxe_fivetuple_protocol { + SXE_FILTER_PROTOCOL_TCP = 0, + SXE_FILTER_PROTOCOL_UDP, + SXE_FILTER_PROTOCOL_SCTP, + SXE_FILTER_PROTOCOL_NONE, +}; + +struct sxe_fivetuple_filter_info { + u32 src_ip; + u32 dst_ip; + u16 src_port; + u16 dst_port; + enum sxe_fivetuple_protocol protocol; + u8 priority; + u8 src_ip_mask:1, + dst_ip_mask:1, + src_port_mask:1, + dst_port_mask:1, + proto_mask:1; +}; + +struct sxe_fivetuple_node_info { + u16 index; + u16 queue; + struct sxe_fivetuple_filter_info filter_info; +}; +#endif + +union sxe_fnav_rule_info { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } ntuple; + __be32 fast_access[11]; +}; + +union sxe_sample_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +void sxe_hw_ops_init(struct sxe_hw *hw); + + +struct sxe_reg_info { + u32 addr; + u32 count; + u32 stride; + const s8 *name; +}; + +struct sxe_setup_operations { + s32 (*reset)(struct sxe_hw *); + void (*pf_rst_done_set)(struct sxe_hw *); + void (*no_snoop_disable)(struct sxe_hw *); + u32 (*reg_read)(struct sxe_hw *, u32); + void (*reg_write)(struct sxe_hw *, u32, u32); + void (*regs_dump)(struct sxe_hw *); + void (*regs_flush)(struct sxe_hw *); + s32 (*regs_test)(struct sxe_hw *); +}; + +struct sxe_hw_setup { + const struct sxe_setup_operations *ops; +}; + +struct sxe_irq_operations { + u32 (*pending_irq_read_clear)(struct sxe_hw *hw); + void (*pending_irq_write_clear)(struct sxe_hw * hw, u32 value); + void (*irq_general_reg_set)(struct sxe_hw *hw, u32 value); + u32 (*irq_general_reg_get)(struct sxe_hw *hw); + void (*ring_irq_auto_disable)(struct sxe_hw *hw, bool is_misx); + void (*set_eitrsel)(struct sxe_hw *hw, u32 value); + void (*ring_irq_interval_set)(struct sxe_hw *hw, u16 irq_idx, u32 interval); + void (*event_irq_interval_set)(struct sxe_hw * hw, u16 irq_idx, u32 value); + void (*event_irq_auto_clear_set)(struct sxe_hw *hw, u32 value); + void (*ring_irq_map)(struct sxe_hw *hw, bool is_tx, + u16 reg_idx, u16 irq_idx); + void (*event_irq_map)(struct sxe_hw *hw, u8 offset, u16 irq_idx); + void (*ring_irq_enable)(struct sxe_hw * hw, u64 qmask); + u32 (*irq_cause_get)(struct sxe_hw * hw); + void (*event_irq_trigger)(struct sxe_hw * hw); + void (*ring_irq_trigger)(struct sxe_hw *hw, u64 eics); + void (*specific_irq_disable)(struct sxe_hw *hw, u32 value); + void (*specific_irq_enable)(struct sxe_hw *hw, u32 value); + void (*all_irq_disable)(struct sxe_hw *hw); + void (*spp_configure)(struct sxe_hw *hw, u32 value); + s32 (*irq_test)(struct sxe_hw *hw, u32 *icr, bool shared); +}; + +struct sxe_irq_info { + const struct sxe_irq_operations *ops; +}; + +struct sxe_mac_operations { + bool (*link_up_1g_check)(struct sxe_hw *); + bool (*link_state_is_up)(struct sxe_hw *); + u32 (*link_speed_get)(struct sxe_hw *); + void (*link_speed_set)(struct sxe_hw *, u32 speed); + void (*pad_enable)(struct sxe_hw *); + s32 (*fc_enable)(struct sxe_hw *); + void (*crc_configure)(struct sxe_hw *); + void (*loopback_switch)(struct sxe_hw *, bool); + void (*txrx_enable)(struct sxe_hw *hw); + void (*max_frame_set)(struct sxe_hw *, u32); + u32 (*max_frame_get)(struct sxe_hw *); + void (*fc_autoneg_localcap_set)(struct sxe_hw *); + void (*fc_tc_high_water_mark_set)(struct sxe_hw *, u8, u32); + void (*fc_tc_low_water_mark_set)(struct sxe_hw *, u8, u32); + void (*fc_param_init)(struct sxe_hw *); + enum sxe_fc_mode (*fc_current_mode_get)(struct sxe_hw *); + enum sxe_fc_mode (*fc_requested_mode_get)(struct sxe_hw *); + void (*fc_requested_mode_set)(struct sxe_hw *, enum sxe_fc_mode); + bool (*is_fc_autoneg_disabled)(struct sxe_hw *); + void (*fc_autoneg_disable_set)(struct sxe_hw *, bool); +}; + +#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 + +struct sxe_mac_info { + const struct sxe_mac_operations *ops; + u8 flags; + bool set_lben; + bool auto_restart; +}; + +struct sxe_filter_mac_operations { + u32 (*rx_mode_get)(struct sxe_hw *); + void (*rx_mode_set)(struct sxe_hw *, u32); + u32 (*pool_rx_mode_get)(struct sxe_hw *, u16); + void (*pool_rx_mode_set)(struct sxe_hw *, u32, u16); + void (*rx_lro_enable) (struct sxe_hw *, bool); + void (*rx_udp_frag_checksum_disable) (struct sxe_hw *); + s32 (*uc_addr_add)(struct sxe_hw *, u32, u8 *, u32); + s32 (*uc_addr_del)(struct sxe_hw *, u32); + void (*uc_addr_clear)(struct sxe_hw *); + void (*mta_hash_table_set)(struct sxe_hw *hw, u8 index, u32 value); + void (*mta_hash_table_update)(struct sxe_hw *hw, u8 reg_idx, u8 bit_idx); + void (*fc_mac_addr_set)(struct sxe_hw *hw, u8 *mac_addr); + + void (*mc_filter_enable)(struct sxe_hw *); + + void (*mc_filter_disable)(struct sxe_hw *hw); + + void (*rx_nfs_filter_disable)(struct sxe_hw *); + void (*ethertype_filter_set)(struct sxe_hw *, u8, u32); + + void (*vt_ctrl_configure)(struct sxe_hw *hw, u8 num_vfs); + +#ifdef SXE_WOL_CONFIGURE + void (*wol_mode_set)(struct sxe_hw *hw, u32 wol_status); + void (*wol_mode_clean)(struct sxe_hw *hw); + void (*wol_status_set)(struct sxe_hw *hw); +#endif + + void (*vt_disable)(struct sxe_hw *hw); + + s32 (*uc_addr_pool_enable)(struct sxe_hw *hw, u8 rar_idx, u8 pool_idx); +}; + +struct sxe_filter_mac { + const struct sxe_filter_mac_operations *ops; +}; + +struct sxe_filter_vlan_operations { + u32 (*pool_filter_read)(struct sxe_hw *, u16); + void (*pool_filter_write)(struct sxe_hw *, u16, u32); + u32 (*pool_filter_bitmap_read)(struct sxe_hw *, u16); + void (*pool_filter_bitmap_write)(struct sxe_hw *, u16, u32); + void (*filter_array_write)(struct sxe_hw *, u16, u32); + u32 (*filter_array_read)(struct sxe_hw *, u16); + void (*filter_array_clear)(struct sxe_hw *); + void (*filter_switch)(struct sxe_hw *,bool); + void (*untagged_pkts_rcv_switch)(struct sxe_hw *, u32, bool); + s32 (*filter_configure)(struct sxe_hw *, u32, u32, bool, bool); +}; + +struct sxe_filter_vlan { + const struct sxe_filter_vlan_operations *ops; +}; + +struct sxe_filter_info { + struct sxe_filter_mac mac; + struct sxe_filter_vlan vlan; +}; + +struct sxe_dbu_operations { + void (*rx_pkt_buf_size_configure)(struct sxe_hw *, u8, u32, u16); + void (*rx_pkt_buf_switch)(struct sxe_hw *, bool); + void (*rx_multi_ring_configure)(struct sxe_hw *, u8, bool, bool); + void (*rss_key_set_all)(struct sxe_hw *, u32 *); + void (*rss_redir_tbl_set_all)(struct sxe_hw *, u8 *); + void (*rx_cap_switch_on)(struct sxe_hw *); + void (*rss_hash_pkt_type_set)(struct sxe_hw *, u32); + void (*rss_hash_pkt_type_update)(struct sxe_hw *, u32); + void (*rss_rings_used_set)(struct sxe_hw *, u32, u16, u16); + void (*lro_ack_switch)(struct sxe_hw *, bool); + void (*vf_rx_switch)(struct sxe_hw *, u32, u32, bool); + + s32 (*fnav_mode_init)(struct sxe_hw *, u32, u32); + s32 (*fnav_specific_rule_mask_set)(struct sxe_hw *, + union sxe_fnav_rule_info *); + s32 (*fnav_specific_rule_add)(struct sxe_hw *, + union sxe_fnav_rule_info *, + u16, u8); + s32 (*fnav_specific_rule_del)(struct sxe_hw *, + union sxe_fnav_rule_info *, u16); + s32 (*fnav_sample_hash_cmd_get)(struct sxe_hw *, + u8, u32, u8, u64 *); + void (*fnav_sample_stats_reinit)(struct sxe_hw *hw); + void (*fnav_sample_hash_set)(struct sxe_hw *hw, u64 hash); + s32 (*fnav_single_sample_rule_del)(struct sxe_hw *,u32); + + void (*ptp_init)(struct sxe_hw *); + void (*ptp_freq_adjust)(struct sxe_hw *, u32); + void (*ptp_systime_init)(struct sxe_hw *); + u64 (*ptp_systime_get)(struct sxe_hw *); + void (*ptp_tx_timestamp_get)(struct sxe_hw *, u32 *ts_sec, u32 *ts_ns); + void (*ptp_timestamp_mode_set)(struct sxe_hw *, bool, u32, u32); + void (*ptp_rx_timestamp_clear)(struct sxe_hw *); + u64 (*ptp_rx_timestamp_get)(struct sxe_hw *); + bool (*ptp_is_rx_timestamp_valid)(struct sxe_hw *); + void (*ptp_timestamp_enable)(struct sxe_hw *); + + void (*tx_pkt_buf_switch)(struct sxe_hw *, bool); + + void (*dcb_tc_rss_configure)(struct sxe_hw *hw, u16 rss_i); + + void (*tx_pkt_buf_size_configure)(struct sxe_hw *, u8); + + void (*rx_cap_switch_off)(struct sxe_hw *); + u32 (*rx_pkt_buf_size_get)(struct sxe_hw *, u8); + void (*rx_func_switch_on)(struct sxe_hw *hw); + + void (*tx_ring_disable)(struct sxe_hw *, u8, unsigned long); + void (*rx_ring_disable)(struct sxe_hw *, u8, unsigned long); + + u32 (*tx_dbu_fc_status_get)(struct sxe_hw *hw); +}; + +struct sxe_dbu_info { + const struct sxe_dbu_operations *ops; +}; + + +struct sxe_dma_operations { + void (*rx_dma_ctrl_init)(struct sxe_hw *); + void (*rx_ring_disable)(struct sxe_hw *, u8); + void (*rx_ring_switch)(struct sxe_hw *, u8, bool); + void (*rx_ring_switch_not_polling)(struct sxe_hw *, u8, bool); + void (*rx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8); + void (*rx_desc_thresh_set)(struct sxe_hw *, u8); + void (*rx_rcv_ctl_configure)(struct sxe_hw *, u8, u32, u32); + void (*rx_lro_ctl_configure)(struct sxe_hw *, u8, u32); + u32 (*rx_desc_ctrl_get)(struct sxe_hw *, u8); + void (*rx_dma_lro_ctl_set)(struct sxe_hw *); + void (*rx_drop_switch)(struct sxe_hw *, u8, bool); + void (*rx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu); + + void (*tx_enable)(struct sxe_hw *); + void (*tx_multi_ring_configure)(struct sxe_hw *, u8, u16, bool, u16); + void (*tx_ring_desc_configure)(struct sxe_hw *, u32, u64, u8); + void (*tx_desc_thresh_set)(struct sxe_hw *, u8, u32, u32, u32); + void (*tx_ring_switch)(struct sxe_hw *, u8, bool); + void (*tx_ring_switch_not_polling)(struct sxe_hw *, u8, bool); + void (*tx_pkt_buf_thresh_configure)(struct sxe_hw *, u8, bool); + u32 (*tx_desc_ctrl_get)(struct sxe_hw *, u8); + void (*tx_ring_info_get)(struct sxe_hw *, u8, u32 *, u32 *); + void (*tx_desc_wb_thresh_clear)(struct sxe_hw *, u8); + + void (*vlan_tag_strip_switch)(struct sxe_hw *, u16, bool); + void (*tx_vlan_tag_set)(struct sxe_hw *, u16, u16, u32); + void (*tx_vlan_tag_clear)(struct sxe_hw *, u32); + void (*tx_tph_update)(struct sxe_hw *hw, u8 ring_idx, u8 cpu); + + void (*tph_switch)(struct sxe_hw *hw, bool is_enable); + + void (*dcb_rx_bw_alloc_configure)(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority); + void (*dcb_tx_desc_bw_alloc_configure)(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + void (*dcb_tx_data_bw_alloc_configure)(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority); + void (*dcb_pfc_configure)(struct sxe_hw *hw, u8 pfc_en, u8 *prio_tc, + u8 max_priority); + void (*dcb_tc_stats_configure)(struct sxe_hw *hw); + void (*dcb_rx_up_tc_map_set)(struct sxe_hw *hw, u8 tc); + void (*dcb_rx_up_tc_map_get)(struct sxe_hw *hw, u8 *map); + void (*dcb_rate_limiter_clear)(struct sxe_hw *hw, u8 ring_max); + + void (*vt_pool_loopback_switch)(struct sxe_hw *hw, bool is_enable); + u32 (*rx_pool_get)(struct sxe_hw *hw, u8 reg_idx); + u32 (*tx_pool_get)(struct sxe_hw *hw, u8 reg_idx); + void (*tx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap); + void (*rx_pool_set)(struct sxe_hw *hw, u8 reg_idx, u32 bitmap); + + void (*vf_tx_desc_addr_clear)(struct sxe_hw *hw, u8 vf_idx, u8 ring_per_pool); + void (*pool_mac_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status); + void (*pool_vlan_anti_spoof_set)(struct sxe_hw *hw, u8 vf_idx, bool status); + void (*spoof_count_enable)(struct sxe_hw *hw, u8 reg_idx, u8 bit_index); + void (*pool_rx_ring_drop_enable)(struct sxe_hw *hw, u8 vf_idx, + u16 pf_vlan, u8 ring_per_pool); + + void (*max_dcb_memory_window_set)(struct sxe_hw *hw, u32 value); + void (*dcb_tx_ring_rate_factor_set)(struct sxe_hw *hw, u32 ring_idx, u32 rate); + + void (*vf_tx_ring_disable)(struct sxe_hw *hw, u8 ring_per_pool, u8 vf_idx); + void (*all_ring_disable)(struct sxe_hw *hw, u32 ring_max); + void (*tx_ring_tail_init)(struct sxe_hw *hw, u8 reg_idx); +}; + +struct sxe_dma_info { + const struct sxe_dma_operations *ops; +}; + +struct sxe_sec_operations { + void (*ipsec_rx_ip_store)(struct sxe_hw *hw, __be32 *ip_addr, u8 ip_len, u8 ip_idx); + void (*ipsec_rx_spi_store)(struct sxe_hw *hw, __be32 spi, u8 ip_idx, u16 idx); + void (*ipsec_rx_key_store)(struct sxe_hw *hw, u32 *key, u8 key_len, u32 salt, u32 mode, u16 idx); + void (*ipsec_tx_key_store)(struct sxe_hw *hw, u32 *key, u8 key_len, u32 salt, u16 idx); + void (*ipsec_sec_data_stop)(struct sxe_hw *hw, bool is_linkup); + void (*ipsec_engine_start)(struct sxe_hw *hw, bool is_linkup); + void (*ipsec_engine_stop)(struct sxe_hw *hw, bool is_linkup); + bool (*ipsec_offload_is_disable)(struct sxe_hw *hw); + void (*ipsec_sa_disable)(struct sxe_hw *hw); +}; + +struct sxe_sec_info { + const struct sxe_sec_operations *ops; +}; + +struct sxe_stat_operations { + void (*stats_clear)(struct sxe_hw *); + void (*stats_get)(struct sxe_hw *, struct sxe_mac_stats *); + + u32 (*tx_packets_num_get)(struct sxe_hw *hw); + u32 (*unsecurity_packets_num_get)(struct sxe_hw *hw); + u32 (*mac_stats_dump)(struct sxe_hw *, u32 *, u32); + u32 (*tx_dbu_to_mac_stats)(struct sxe_hw *hw); +}; + +struct sxe_stat_info { + const struct sxe_stat_operations *ops; +}; + +struct sxe_mbx_operations { + void (*init)(struct sxe_hw *hw); + + s32 (*msg_send)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index); + s32 (*msg_rcv)(struct sxe_hw *hw, u32 *msg, u16 len, u16 index); + + bool (*req_check)(struct sxe_hw *hw, u8 vf_idx); + bool (*ack_check)(struct sxe_hw *hw, u8 vf_idx); + bool (*rst_check)(struct sxe_hw *hw, u8 vf_idx); + + void (*mbx_mem_clear)(struct sxe_hw *hw, u8 vf_idx); +}; + +struct sxe_mbx_stats { + u32 send_msgs; + u32 rcv_msgs; + + u32 reqs; + u32 acks; + u32 rsts; +}; + +struct sxe_mbx_info { + const struct sxe_mbx_operations *ops; + struct sxe_mbx_stats stats; + u32 retry; + u32 interval; + u32 msg_len; +}; + +struct sxe_pcie_operations { + void (*vt_mode_set)(struct sxe_hw *hw, u32 value); +}; + +struct sxe_pcie_info { + const struct sxe_pcie_operations *ops; +}; + +enum sxe_hw_state { + SXE_HW_STOP, + SXE_HW_FAULT, +}; + +enum sxe_fc_mode { + SXE_FC_NONE = 0, + SXE_FC_RX_PAUSE, + SXE_FC_TX_PAUSE, + SXE_FC_FULL, + SXE_FC_DEFAULT, +}; + +struct sxe_fc_info { + u32 high_water[MAX_TRAFFIC_CLASS]; + u32 low_water[MAX_TRAFFIC_CLASS]; + u16 pause_time; + bool strict_ieee; + bool disable_fc_autoneg; + u16 send_xon; + enum sxe_fc_mode current_mode; + enum sxe_fc_mode requested_mode; +}; + +struct sxe_fc_nego_mode { + u32 adv_sym; + u32 adv_asm; + u32 lp_sym; + u32 lp_asm; + +}; + +struct sxe_hdc_operations { + s32 (*pf_lock_get)(struct sxe_hw *, u32); + void (*pf_lock_release)(struct sxe_hw *, u32); + bool (*is_fw_over_set)(struct sxe_hw *); + u32 (*fw_ack_header_rcv)(struct sxe_hw *); + void (*packet_send_done)(struct sxe_hw *); + void (*packet_header_send)(struct sxe_hw *, u32); + void (*packet_data_dword_send)(struct sxe_hw *, u16, u32); + u32 (*packet_data_dword_rcv)(struct sxe_hw *, u16); + u32 (*fw_status_get)(struct sxe_hw *); + void (*drv_status_set)(struct sxe_hw *, u32); + u32 (*irq_event_get)(struct sxe_hw *); + void (*irq_event_clear)(struct sxe_hw *, u32); + void (*fw_ov_clear)(struct sxe_hw *); + u32 (*channel_state_get)(struct sxe_hw *); + void (*resource_clean)(struct sxe_hw *); +}; + +struct sxe_hdc_info { + u32 pf_lock_val; + const struct sxe_hdc_operations *ops; +}; + +struct sxe_phy_operations { + s32 (*reg_write)(struct sxe_hw *hw, s32 prtad, u32 reg_addr, + u32 device_type, u16 phy_data); + s32 (*reg_read)(struct sxe_hw *hw, s32 prtad, u32 reg_addr, + u32 device_type, u16 *phy_data); + s32 (*identifier_get)(struct sxe_hw *hw, u32 prtad, u32 *id); + s32 (*link_cap_get)(struct sxe_hw *hw, u32 prtad, u32 *speed); + s32 (*reset)(struct sxe_hw *hw, u32 prtad); +}; + +struct sxe_phy_reg_info { + const struct sxe_phy_operations *ops; +}; + +struct sxe_hw { + u8 __iomem *reg_base_addr; + + void *adapter; + void *priv; + unsigned long state; + void (*fault_handle)(void *priv); + u32 (*reg_read)(const volatile void *reg); + void (*reg_write)(u32 value, volatile void *reg); + + struct sxe_hw_setup setup; + struct sxe_irq_info irq; + struct sxe_mac_info mac; + struct sxe_filter_info filter; + struct sxe_dbu_info dbu; + struct sxe_dma_info dma; + struct sxe_sec_info sec; + struct sxe_stat_info stat; + struct sxe_fc_info fc; + + struct sxe_mbx_info mbx; + struct sxe_pcie_info pcie; + struct sxe_hdc_info hdc; + struct sxe_phy_reg_info phy; +}; + +u16 sxe_mac_reg_num_get(void); + +void sxe_hw_fault_handle(struct sxe_hw *hw); + +bool sxe_device_supports_autoneg_fc(struct sxe_hw *hw); + +void sxe_hw_ops_init(struct sxe_hw *hw); + +u32 sxe_hw_rss_key_get_by_idx(struct sxe_hw *hw, u8 reg_idx); + +bool sxe_hw_is_rss_enabled(struct sxe_hw *hw); + +u32 sxe_hw_rss_field_get(struct sxe_hw *hw); + +static inline bool sxe_is_hw_fault(struct sxe_hw *hw) +{ + return test_bit(SXE_HW_FAULT, &hw->state); +} + +static inline void sxe_hw_fault_handle_init(struct sxe_hw *hw, + void (*handle)(void *), void *priv) +{ + hw->priv = priv; + hw->fault_handle = handle; + + return; +} + +static inline void sxe_hw_reg_handle_init(struct sxe_hw *hw, + u32 (*read)(const volatile void *), + void (*write)(u32, volatile void *)) +{ + hw->reg_read = read; + hw->reg_write = write; + + return; +} + +#ifdef SXE_DPDK + +void sxe_hw_crc_strip_config(struct sxe_hw *hw, bool keep_crc); + +void sxe_hw_stats_seq_clean(struct sxe_hw *hw, struct sxe_mac_stats *stats); + +void sxe_hw_hdc_drv_status_set(struct sxe_hw *hw, u32 value); + +s32 sxe_hw_nic_reset(struct sxe_hw *hw); + +u16 sxe_hw_fc_pause_time_get(struct sxe_hw *hw); + +void sxe_hw_fc_pause_time_set(struct sxe_hw *hw, u16 pause_time); + +void sxe_fc_autoneg_localcap_set(struct sxe_hw *hw); + +u32 sxe_hw_fc_tc_high_water_mark_get(struct sxe_hw *hw, u8 tc_idx); + +u32 sxe_hw_fc_tc_low_water_mark_get(struct sxe_hw *hw, u8 tc_idx); + +u16 sxe_hw_fc_send_xon_get(struct sxe_hw *hw); + +void sxe_hw_fc_send_xon_set(struct sxe_hw *hw, u16 send_xon); + +u32 sxe_hw_rx_mode_get(struct sxe_hw *hw); + +void sxe_hw_rx_mode_set(struct sxe_hw *hw, u32 filter_ctrl); + +void sxe_hw_specific_irq_enable(struct sxe_hw *hw, u32 value); + +void sxe_hw_specific_irq_disable(struct sxe_hw *hw, u32 value); + +void sxe_hw_irq_general_reg_set(struct sxe_hw *hw, u32 value); + +u32 sxe_hw_irq_general_reg_get(struct sxe_hw *hw); + +void sxe_hw_event_irq_map(struct sxe_hw *hw, u8 offset, u16 irq_idx); + +void sxe_hw_ring_irq_map(struct sxe_hw *hw, bool is_tx, + u16 reg_idx, u16 irq_idx); + +void sxe_hw_ring_irq_interval_set(struct sxe_hw *hw, + u16 irq_idx, u32 interval); + +void sxe_hw_event_irq_auto_clear_set(struct sxe_hw *hw, u32 value); + +void sxe_hw_all_irq_disable(struct sxe_hw *hw); + +void sxe_hw_ring_irq_auto_disable(struct sxe_hw *hw, + bool is_msix); + +u32 sxe_hw_irq_cause_get(struct sxe_hw *hw); + +void sxe_hw_pending_irq_write_clear(struct sxe_hw *hw, u32 value); + +u32 sxe_hw_ring_irq_switch_get(struct sxe_hw *hw, u8 idx); + +void sxe_hw_ring_irq_switch_set(struct sxe_hw *hw, u8 idx, u32 value); + +s32 sxe_hw_uc_addr_add(struct sxe_hw *hw, u32 rar_idx, + u8 *addr, u32 pool_idx); + +s32 sxe_hw_uc_addr_del(struct sxe_hw *hw, u32 index); + +u32 sxe_hw_uta_hash_table_get(struct sxe_hw *hw, u8 reg_idx); + +void sxe_hw_uta_hash_table_set(struct sxe_hw *hw, + u8 reg_idx, u32 value); + +void sxe_hw_mta_hash_table_set(struct sxe_hw *hw, + u8 index, u32 value); + +void sxe_hw_mc_filter_enable(struct sxe_hw *hw); + +void sxe_hw_vlan_filter_array_write(struct sxe_hw *hw, + u16 reg_index, u32 value); + +u32 sxe_hw_vlan_filter_array_read(struct sxe_hw *hw, u16 reg_index); + +void sxe_hw_vlan_filter_switch(struct sxe_hw *hw, bool is_enable); + +u32 sxe_hw_vlan_type_get(struct sxe_hw *hw); + +void sxe_hw_vlan_type_set(struct sxe_hw *hw, u32 value); + +void sxe_hw_vlan_ext_vet_write(struct sxe_hw *hw, u32 value); + +void sxe_hw_vlan_tag_strip_switch(struct sxe_hw *hw, + u16 reg_index, bool is_enable); + +void sxe_hw_txctl_vlan_type_set(struct sxe_hw *hw, u32 value); + +u32 sxe_hw_txctl_vlan_type_get(struct sxe_hw *hw); + +u32 sxe_hw_ext_vlan_get(struct sxe_hw *hw); + +void sxe_hw_ext_vlan_set(struct sxe_hw *hw, u32 value); + +void sxe_hw_pf_rst_done_set(struct sxe_hw *hw); + +u32 sxe_hw_all_regs_group_num_get(void); + +void sxe_hw_all_regs_group_read(struct sxe_hw *hw, u32 *data); + +s32 sxe_hw_fc_enable(struct sxe_hw *hw); + +bool sxe_hw_is_fc_autoneg_disabled(struct sxe_hw *hw); + +void sxe_hw_fc_status_get(struct sxe_hw *hw, + bool *rx_pause_on, bool *tx_pause_on); + +void sxe_hw_fc_requested_mode_set(struct sxe_hw *hw, + enum sxe_fc_mode mode); + +void sxe_hw_fc_tc_high_water_mark_set(struct sxe_hw *hw, + u8 tc_idx, u32 mark); + +void sxe_hw_fc_tc_low_water_mark_set(struct sxe_hw *hw, + u8 tc_idx, u32 mark); + +void sxe_hw_fc_autoneg_disable_set(struct sxe_hw *hw, + bool is_disabled); + +u32 sxe_hw_rx_pkt_buf_size_get(struct sxe_hw *hw, u8 pb); + +void sxe_hw_ptp_init(struct sxe_hw *hw); + +void sxe_hw_ptp_timestamp_mode_set(struct sxe_hw *hw, + bool is_l2, u32 tsctl, u32 tses); + +void sxe_hw_ptp_timestamp_enable(struct sxe_hw *hw); + +void sxe_hw_ptp_time_inc_stop(struct sxe_hw *hw); + +void sxe_hw_ptp_rx_timestamp_clear(struct sxe_hw *hw); + +void sxe_hw_ptp_timestamp_disable(struct sxe_hw *hw); + +bool sxe_hw_ptp_is_rx_timestamp_valid(struct sxe_hw *hw); + +u64 sxe_hw_ptp_rx_timestamp_get(struct sxe_hw *hw); + +void sxe_hw_ptp_tx_timestamp_get(struct sxe_hw *hw, + u32 *ts_sec, u32 *ts_ns); + +u64 sxe_hw_ptp_systime_get(struct sxe_hw *hw); + +void sxe_hw_rss_cap_switch(struct sxe_hw *hw, bool is_on); + +void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key); + +void sxe_hw_rss_field_set(struct sxe_hw *hw, u32 rss_field); + +void sxe_hw_rss_redir_tbl_set_all(struct sxe_hw *hw, u8 *redir_tbl); + +u32 sxe_hw_rss_redir_tbl_get_by_idx(struct sxe_hw *hw, u16); + +void sxe_hw_rss_redir_tbl_set_by_idx(struct sxe_hw *hw, + u16 reg_idx, u32 value); + +void sxe_hw_rx_dma_ctrl_init(struct sxe_hw *hw); + +void sxe_hw_mac_max_frame_set(struct sxe_hw *hw, u32 max_frame); + +void sxe_hw_rx_udp_frag_checksum_disable(struct sxe_hw *hw); + +void sxe_hw_rx_ip_checksum_offload_switch(struct sxe_hw *hw, + bool is_on); + +void sxe_hw_rx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on); + +void sxe_hw_rx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on); + +void sxe_hw_rx_ring_desc_configure(struct sxe_hw *hw, + u32 desc_mem_len, u64 desc_dma_addr, + u8 reg_idx); + +void sxe_hw_rx_rcv_ctl_configure(struct sxe_hw *hw, u8 reg_idx, + u32 header_buf_len, u32 pkg_buf_len + ); + +void sxe_hw_rx_drop_switch(struct sxe_hw *hw, u8 idx, bool is_enable); + +void sxe_hw_rx_desc_thresh_set(struct sxe_hw *hw, u8 reg_idx); + +void sxe_hw_rx_lro_ack_switch(struct sxe_hw *hw, bool is_on); + +void sxe_hw_rx_dma_lro_ctrl_set(struct sxe_hw *hw); + +void sxe_hw_rx_nfs_filter_disable(struct sxe_hw *hw); + +void sxe_hw_rx_lro_enable(struct sxe_hw *hw, bool is_enable); + +void sxe_hw_rx_lro_ctl_configure(struct sxe_hw *hw, + u8 reg_idx, u32 max_desc); +void sxe_hw_loopback_switch(struct sxe_hw *hw, bool is_enable); + +void sxe_hw_rx_cap_switch_off(struct sxe_hw *hw); + +void sxe_hw_tx_ring_info_get(struct sxe_hw *hw, + u8 idx, u32 *head, u32 *tail); + +void sxe_hw_tx_ring_switch(struct sxe_hw *hw, u8 reg_idx, bool is_on); + +void sxe_hw_tx_ring_switch_not_polling(struct sxe_hw *hw, u8 reg_idx, bool is_on); + +void sxe_hw_rx_queue_desc_reg_configure(struct sxe_hw *hw, + u8 reg_idx, u32 rdh_value, + u32 rdt_value); + +u32 sxe_hw_hdc_fw_status_get(struct sxe_hw *hw); + +s32 sxe_hw_hdc_lock_get(struct sxe_hw *hw, u32 trylock); + +void sxe_hw_hdc_lock_release(struct sxe_hw *hw, u32 retry_cnt); + +bool sxe_hw_hdc_is_fw_over_set(struct sxe_hw *hw); + +void sxe_hw_hdc_fw_ov_clear(struct sxe_hw *hw); + +u32 sxe_hw_hdc_fw_ack_header_get(struct sxe_hw *hw); + +void sxe_hw_hdc_packet_send_done(struct sxe_hw *hw); + +void sxe_hw_hdc_packet_header_send(struct sxe_hw *hw, u32 value); + +void sxe_hw_hdc_packet_data_dword_send(struct sxe_hw *hw, + u16 dword_index, u32 value); + +u32 sxe_hw_hdc_packet_data_dword_rcv(struct sxe_hw *hw, + u16 dword_index); + +u32 sxe_hw_hdc_channel_state_get(struct sxe_hw *hw); + +u32 sxe_hw_pending_irq_read_clear(struct sxe_hw *hw); + +void sxe_hw_all_ring_disable(struct sxe_hw *hw, u32 ring_max); + +void sxe_hw_tx_ring_head_init(struct sxe_hw *hw, u8 reg_idx); + +void sxe_hw_tx_ring_tail_init(struct sxe_hw *hw, u8 reg_idx); + +void sxe_hw_tx_enable(struct sxe_hw *hw); + +void sxe_hw_tx_desc_thresh_set( + struct sxe_hw *hw, + u8 reg_idx, + u32 wb_thresh, + u32 host_thresh, + u32 prefech_thresh); + +void sxe_hw_tx_pkt_buf_switch(struct sxe_hw *hw, bool is_on); + +void sxe_hw_tx_pkt_buf_size_configure(struct sxe_hw *hw, u8 num_pb); + +void sxe_hw_tx_pkt_buf_thresh_configure(struct sxe_hw *hw, + u8 num_pb, bool dcb_enable); + +void sxe_hw_tx_ring_desc_configure(struct sxe_hw *hw, + u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx); + +void sxe_hw_mac_txrx_enable(struct sxe_hw *hw); + +void sxe_hw_rx_cap_switch_on(struct sxe_hw *hw); + +void sxe_hw_mac_pad_enable(struct sxe_hw *hw); + +bool sxe_hw_is_link_state_up(struct sxe_hw *hw); + +u32 sxe_hw_link_speed_get(struct sxe_hw *hw); + +void sxe_hw_fc_base_init(struct sxe_hw *hw); + +void sxe_hw_stats_get(struct sxe_hw *hw, struct sxe_mac_stats *stats); + +void sxe_hw_rxq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value); + +void sxe_hw_txq_stat_map_set(struct sxe_hw *hw, u8 idx, u32 value); + +void sxe_hw_uc_addr_clear(struct sxe_hw *hw); + +void sxe_hw_vt_disable(struct sxe_hw *hw); + +void sxe_hw_stats_regs_clean(struct sxe_hw *hw); + +void sxe_hw_vlan_ext_type_set(struct sxe_hw *hw, u32 value); + +void sxe_hw_link_speed_set(struct sxe_hw *hw, u32 speed); + +void sxe_hw_crc_configure(struct sxe_hw *hw); + +void sxe_hw_vlan_filter_array_clear(struct sxe_hw *hw); + +void sxe_hw_no_snoop_disable(struct sxe_hw *hw); + +void sxe_hw_dcb_rate_limiter_clear(struct sxe_hw *hw, u8 ring_max); + +s32 sxe_hw_pfc_enable(struct sxe_hw *hw, u8 tc_idx); + +void sxe_hw_dcb_vmdq_mq_configure(struct sxe_hw *hw, u8 num_pools); + +void sxe_hw_dcb_vmdq_default_pool_configure(struct sxe_hw *hw, + u8 default_pool_enabled, + u8 default_pool_idx); + +void sxe_hw_dcb_vmdq_up_2_tc_configure(struct sxe_hw *hw, + u8 *tc_arr); + +void sxe_hw_dcb_vmdq_vlan_configure(struct sxe_hw *hw, + u8 num_pools); + +void sxe_hw_dcb_vmdq_pool_configure(struct sxe_hw *hw, + u8 pool_idx, u16 vlan_id, + u64 pools_map); + +void sxe_hw_dcb_rx_configure(struct sxe_hw *hw, bool is_vt_on, + u8 sriov_active, u8 pg_tcs); + +void sxe_hw_dcb_tx_configure(struct sxe_hw *hw, bool is_vt_on, u8 pg_tcs); + +void sxe_hw_pool_xmit_enable(struct sxe_hw *hw, u16 reg_idx, u8 pool_num); + +void sxe_hw_rx_pkt_buf_size_set(struct sxe_hw *hw, u8 tc_idx, u16 pbsize); + +void sxe_hw_dcb_tc_stats_configure(struct sxe_hw *hw, + u8 tc_count, bool vmdq_active); + +void sxe_hw_dcb_rx_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority); + +void sxe_hw_dcb_tx_desc_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +void sxe_hw_dcb_tx_data_bw_alloc_configure(struct sxe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc, + u8 max_priority); + +void sxe_hw_dcb_pfc_configure(struct sxe_hw *hw, + u8 pfc_en, u8 *prio_tc, + u8 max_priority); + +void sxe_hw_vmdq_mq_configure(struct sxe_hw *hw); + +void sxe_hw_vmdq_default_pool_configure(struct sxe_hw *hw, + u8 default_pool_enabled, + u8 default_pool_idx); + +void sxe_hw_vmdq_vlan_configure(struct sxe_hw *hw, + u8 num_pools, u32 rx_mode); + +void sxe_hw_vmdq_pool_configure(struct sxe_hw *hw, + u8 pool_idx, u16 vlan_id, + u64 pools_map); + +void sxe_hw_vmdq_loopback_configure(struct sxe_hw *hw); + +void sxe_hw_tx_multi_queue_configure(struct sxe_hw *hw, + bool vmdq_enable, bool sriov_enable, u16 pools_num); + +void sxe_hw_dcb_max_mem_window_set(struct sxe_hw *hw, u32 value); + +void sxe_hw_dcb_tx_ring_rate_factor_set(struct sxe_hw *hw, + u32 ring_idx, u32 rate); + +void sxe_hw_mbx_init(struct sxe_hw *hw); + +void sxe_hw_vt_ctrl_cfg(struct sxe_hw *hw, u8 num_vfs); + +void sxe_hw_tx_pool_bitmap_set(struct sxe_hw *hw, + u8 reg_idx, u32 bitmap); + +void sxe_hw_rx_pool_bitmap_set(struct sxe_hw *hw, + u8 reg_idx, u32 bitmap); + +void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw, + bool is_enable); + +void sxe_hw_mac_pool_clear(struct sxe_hw *hw, u8 rar_idx); + +s32 sxe_hw_uc_addr_pool_enable(struct sxe_hw *hw, + u8 rar_idx, u8 pool_idx); + +void sxe_hw_pcie_vt_mode_set(struct sxe_hw *hw, u32 value); + +u32 sxe_hw_pcie_vt_mode_get(struct sxe_hw *hw); + +void sxe_hw_pool_mac_anti_spoof_set(struct sxe_hw *hw, + u8 vf_idx, bool status); + +void sxe_rx_fc_threshold_set(struct sxe_hw *hw); + +void sxe_hw_rx_multi_ring_configure(struct sxe_hw *hw, + u8 tcs, bool is_4Q, + bool sriov_enable); + +void sxe_hw_rx_queue_mode_set(struct sxe_hw *hw, u32 mrqc); + +bool sxe_hw_vf_rst_check(struct sxe_hw *hw, u8 vf_idx); + +bool sxe_hw_vf_req_check(struct sxe_hw *hw, u8 vf_idx); + +bool sxe_hw_vf_ack_check(struct sxe_hw *hw, u8 vf_idx); + +s32 sxe_hw_rcv_msg_from_vf(struct sxe_hw *hw, u32 *msg, + u16 msg_len, u16 index); + +s32 sxe_hw_send_msg_to_vf(struct sxe_hw *hw, u32 *msg, + u16 msg_len, u16 index); + +void sxe_hw_mbx_mem_clear(struct sxe_hw *hw, u8 vf_idx); + +u32 sxe_hw_pool_rx_mode_get(struct sxe_hw *hw, u16 pool_idx); + +void sxe_hw_pool_rx_mode_set(struct sxe_hw *hw, + u32 vmolr, u16 pool_idx); + +void sxe_hw_tx_vlan_tag_clear(struct sxe_hw *hw, u32 vf); + +u32 sxe_hw_rx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx); + +u32 sxe_hw_tx_pool_bitmap_get(struct sxe_hw *hw, u8 reg_idx); + +void sxe_hw_pool_rx_ring_drop_enable(struct sxe_hw *hw, u8 vf_idx, + u16 pf_vlan, u8 ring_per_pool); + +void sxe_hw_spoof_count_enable(struct sxe_hw *hw, + u8 reg_idx, u8 bit_index); + +u32 sxe_hw_tx_vlan_insert_get(struct sxe_hw *hw, u32 vf); + +bool sxe_hw_vt_status(struct sxe_hw *hw); + +s32 sxe_hw_vlvf_slot_find(struct sxe_hw *hw, u32 vlan, bool vlvf_bypass); + +u32 sxe_hw_vlan_pool_filter_read(struct sxe_hw *hw, u16 reg_index); + +void sxe_hw_mirror_vlan_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb); + +void sxe_hw_mirror_virtual_pool_set(struct sxe_hw *hw, u8 idx,u32 lsb, u32 msb); + +void sxe_hw_mirror_ctl_set(struct sxe_hw *hw, u8 rule_id, + u8 mirror_type, u8 dst_pool, bool on); + +void sxe_hw_mirror_rule_clear(struct sxe_hw *hw, u8 rule_id); + +u32 sxe_hw_mac_max_frame_get(struct sxe_hw *hw); + +void sxe_hw_mta_hash_table_update(struct sxe_hw *hw, + u8 reg_idx, u8 bit_idx); + +void sxe_hw_vf_queue_drop_enable(struct sxe_hw *hw, u8 vf_idx, + u8 ring_per_pool); +void sxe_hw_fc_mac_addr_set(struct sxe_hw *hw, u8 *mac_addr); + +void sxe_hw_macsec_enable(struct sxe_hw *hw, bool is_up, u32 tx_mode, + u32 rx_mode, u32 pn_trh); + +void sxe_hw_macsec_disable(struct sxe_hw *hw, bool is_up); + +void sxe_hw_macsec_txsc_set(struct sxe_hw *hw, u32 scl, u32 sch); + +void sxe_hw_macsec_rxsc_set(struct sxe_hw *hw, u32 scl, u32 sch, u16 pi); + +void sxe_hw_macsec_tx_sa_configure(struct sxe_hw *hw, u8 sa_idx, + u8 an, u32 pn, u32 *keys); + +void sxe_hw_macsec_rx_sa_configure(struct sxe_hw *hw, u8 sa_idx, + u8 an, u32 pn, u32 *keys); +void sxe_hw_vt_pool_loopback_switch(struct sxe_hw *hw, + bool is_enable); + +#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_FILTER_CTRL +void sxe_hw_fnav_rx_pkt_buf_size_reset(struct sxe_hw *hw, u32 pbsize); + +void sxe_hw_fnav_flex_mask_set(struct sxe_hw *hw, u16 flex_mask); + +void sxe_hw_fnav_ipv6_mask_set(struct sxe_hw *hw, u16 src_mask, u16 dst_mask); + +s32 sxe_hw_fnav_flex_offset_set(struct sxe_hw *hw, u16 offset); + +void sxe_hw_fivetuple_filter_add(struct rte_eth_dev *dev, + struct sxe_fivetuple_node_info *filter); + +void sxe_hw_fivetuple_filter_del(struct sxe_hw *hw, u16 reg_index); + +void sxe_hw_ethertype_filter_add(struct sxe_hw *hw, + u8 reg_index, u16 ethertype, u16 queue); + +void sxe_hw_ethertype_filter_del(struct sxe_hw *hw, u8 filter_type); + +void sxe_hw_syn_filter_add(struct sxe_hw *hw, u16 queue, u8 priority); + +void sxe_hw_syn_filter_del(struct sxe_hw *hw); + +void sxe_hw_rss_key_set_all(struct sxe_hw *hw, u32 *rss_key); +#endif + +void sxe_hw_fnav_enable(struct sxe_hw *hw, u32 fnavctrl); + +s32 sxe_hw_fnav_sample_rules_table_reinit(struct sxe_hw *hw); + +s32 sxe_hw_fnav_specific_rule_add(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u16 soft_id, u8 queue); + +s32 sxe_hw_fnav_specific_rule_del(struct sxe_hw *hw, + union sxe_fnav_rule_info *input, + u16 soft_id); + +void sxe_hw_fnav_sample_rule_configure(struct sxe_hw *hw, + u8 flow_type, u32 hash_value, u8 queue); + +void sxe_hw_rss_redir_tbl_reg_write(struct sxe_hw *hw, + u16 reg_idx, u32 value); + +u32 sxe_hw_fnav_port_mask_get(__be16 src_port_mask, __be16 dst_port_mask); + +s32 sxe_hw_fnav_specific_rule_mask_set(struct sxe_hw *hw, + union sxe_fnav_rule_info *input_mask); + +s32 sxe_hw_vlan_filter_configure(struct sxe_hw *hw, + u32 vid, u32 pool, + bool vlan_on, bool vlvf_bypass); + +void sxe_hw_ptp_systime_init(struct sxe_hw *hw); + +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.c new file mode 100644 index 000000000000..49a982bf78f8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.c @@ -0,0 +1,1241 @@ + +#ifdef SXE_IPSEC_CONFIGURE + +#include +#include +#include + +#include "sxe_ipsec.h" +#include "sxe_tx_proc.h" +#include "sxe_ring.h" +#include "sxe_sriov.h" + +static const char ipsec_aes_name[] = "rfc4106(gcm(aes))"; + +#ifdef CONFIG_XFRM +static inline bool sxe_need_tx_ipsec_offload(struct sk_buff *skb) +{ + struct sec_path *sp = skb->sp; + bool ret = true; + + if (!sp || !sp->olen || (sp->len != sp->olen)) { + ret = false; + } + + return ret; +} + +static struct xfrm_state *sxe_ipsec_rx_sa_match(struct sxe_ipsec_context *ipsec, + __be32 spi, u8 proto, __be32 *daddr, u8 daddr_len) +{ + struct sxe_rx_sa *sa; + struct xfrm_state *xs = NULL; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_table_list, sa, hlist, + (__force u32) spi) { + if (spi == sa->xs->id.spi && + proto == sa->xs->id.proto && + !memcmp(daddr, &(sa->xs->id.daddr), daddr_len)) { + xs = sa->xs; + xfrm_state_hold(xs); + break; + } + } + rcu_read_unlock(); + return xs; + +} + +#else +STATIC inline bool sxe_need_tx_ipsec_offload(struct sk_buff *skb) +{ + return false; +} + +STATIC struct xfrm_state *sxe_ipsec_rx_sa_match(struct sxe_ipsec_context *ipsec, + __be32 spi, u8 proto, __be32 *daddr, u8 daddr_len) +{ + return NULL; +} + +#endif + +static s32 sxe_ipsec_tx_offload_param_valid( + struct sxe_adapter *adapter, + struct sk_buff *skb, + struct sxe_tx_sa *sa, + u16 *sa_idx, struct xfrm_state **xfrm_state) +{ + s32 ret = -SXE_ERR_PARAM; + u16 idx; + struct sec_path *path; + struct xfrm_state *xs; + + path = skb_sec_path(skb); + if (unlikely(!path->len)) { + LOG_DEV_ERR("security path len:0 invalid.\n"); + goto l_out; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + LOG_DEV_ERR("security input xs NULL.\n"); + goto l_out; + } + + *xfrm_state = xs; + idx = xs->xso.offload_handle - SXE_IPSEC_TX_INDEX_BASE; + if (idx >= SXE_IPSEC_SA_CNT_MAX) { + LOG_DEV_ERR("invalid offload_handle:%lu idx:%d.\n", + xs->xso.offload_handle, idx); + goto l_out; + } + + if (!test_bit(SXE_IPSEC_SA_ENTRY_USED, &sa[idx].status)) { + LOG_DEV_ERR("tx_table[%d] not used.\n", idx); + goto l_out; + } + + *sa_idx = idx; + + LOG_INFO("idx:%d tx ipsec offload valid passed\n", + idx); + ret = 0; + +l_out: + return ret; +} + +s32 sxe_tx_ipsec_offload(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *first, + struct sxe_tx_context_desc *ctxt_desc) +{ + u16 sa_idx; + s32 ret = 0; + struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_tx_sa *sa = ipsec->tx_table; + struct xfrm_state *xfrm_state = NULL; + u32 tucmd_ipsec = 0; + + if (!sxe_need_tx_ipsec_offload(first->skb)) { + LOG_DEBUG("ring[%u] no need offload IPsec.\n", tx_ring->idx); + goto l_out; + } + + ret = sxe_ipsec_tx_offload_param_valid(adapter, first->skb, sa, &sa_idx, &xfrm_state); + if (ret) { + LOG_ERROR("ring[%d ]tx ipsec valid failed.\n", tx_ring->idx); + goto l_out; + } + + first->tx_features |= SXE_TX_FEATURE_IPSEC; + + if (xfrm_state->id.proto == IPPROTO_ESP) { + tucmd_ipsec = SXE_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP | + SXE_TX_CTXTD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) { + tucmd_ipsec |= SXE_TX_CTXTD_TUCMD_IPV4; + } + + if (!skb_is_gso(first->skb)) { + const u32 auth_len = SXE_IPSEC_AUTH_BIT_LEN / CHAR_BITS; + u8 pad_len; + + ret = skb_copy_bits(first->skb, first->skb->len - SXE_IPSEC_PADLEN_OFFSET, + &pad_len, 1); + if (unlikely(ret)) { + LOG_ERROR("auth_len:%d offset:%d copy skb " + "failed.(err:%d)\n", + auth_len, + first->skb->len - SXE_IPSEC_PADLEN_OFFSET, + ret); + goto l_out; + } + tucmd_ipsec |= (SXE_IPSEC_PADLEN_OFFSET + pad_len); + } + } else { + if (first->protocol == htons(ETH_P_IP)) { + tucmd_ipsec |= SXE_TX_CTXTD_TUCMD_IPV4; + } + } + + if (sa[sa_idx].encrypt) { + tucmd_ipsec |= SXE_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN; + } + + sxe_ctxt_desc_sa_idx_set(ctxt_desc, sa_idx); + + sxe_ctxt_desc_tucmd_set(ctxt_desc, tucmd_ipsec); + +l_out: + return ret; +} + +void sxe_rx_ipsec_proc(struct sxe_ring *tx_ring, + union sxe_rx_data_desc *desc, + struct sk_buff *skb) +{ + s32 ret = 0; + struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + __le16 pkt_info = desc->wb.lower.lo_dword.hs_rss.pkt_info; + struct iphdr *ip4_hdr = NULL; + struct ipv6hdr *ip6_hdr = NULL; + void *daddr = NULL; + unsigned long daddr_len; + u8 *sec_hdr = NULL; + struct xfrm_state *xs = NULL; + struct xfrm_offload *offload = NULL; + __be32 spi; + u8 proto; + + if (!sxe_status_err_check(desc, SXE_RXD_STAT_SECP)) { + LOG_DEBUG("not security packet, no need parse " + "security header.\n"); + goto l_out; + } + + if (pkt_info & cpu_to_le16(SXE_RXDADV_PKTTYPE_IPV4)) { + ip4_hdr = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4_hdr->daddr; + daddr_len = sizeof(ip4_hdr->daddr); + sec_hdr = (u8 *)ip4_hdr + ip4_hdr->ihl * SXE_IP_HEAD_LEN_UNIT; + } else if (pkt_info & cpu_to_le16(SXE_RXDADV_PKTTYPE_IPV6)) { + ip6_hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6_hdr->daddr; + daddr_len = sizeof(ip6_hdr->daddr); + sec_hdr = (u8 *)ip6_hdr + sizeof(struct ipv6hdr); + } else { + ret = -SXE_ERR_DEVICE_NOT_SUPPORTED; + LOG_ERROR("sxe security not support L3 protocol:0x%x.\n", + desc->wb.lower.lo_dword.hs_rss.pkt_info); + goto l_out; + }; + + if (pkt_info & cpu_to_le16(SXE_RXDADV_PKTTYPE_IPSEC_ESP)) { + spi = ((struct ip_esp_hdr *)sec_hdr)->spi; + proto = IPPROTO_ESP; + } else if (pkt_info & cpu_to_le16(SXE_RXDADV_PKTTYPE_IPSEC_AH)) { + spi = ((struct ip_auth_hdr *)sec_hdr)->spi; + proto = IPPROTO_AH; + } else { + ret = -SXE_ERR_DEVICE_NOT_SUPPORTED; + LOG_ERROR("sxe security not support security protocol:0x%x.\n", + desc->wb.lower.lo_dword.hs_rss.pkt_info); + goto l_out; + } + + xs = sxe_ipsec_rx_sa_match(ipsec, spi, proto, daddr, *(u8 *)&daddr_len); + if (!xs) { + ret = -SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT; + LOG_ERROR("spi:0x%x, proto:0x%x daddr:%pI6 daddr_len:%lu" + "not matched sw rx sa entry.(err:%d)", + spi, proto, daddr, daddr_len, ret); + goto l_out; + } + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) { + LOG_INFO("skb security path null.\n"); + goto l_out; + } + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + + offload = xfrm_offload(skb); + offload->flags = CRYPTO_DONE; + if (sxe_status_err_check(desc, SXE_RXDADV_ERR_IPSEC_AUTH_FAILED)) { + offload->status = (proto == IPPROTO_ESP) ? + CRYPTO_TRANSPORT_ESP_AUTH_FAILED : CRYPTO_TRANSPORT_AH_AUTH_FAILED; + } + else { + offload->status = CRYPTO_SUCCESS; + } + + atomic64_inc(&ipsec->rx_ipsec); + +l_out: + return; +} + +static s32 sxe_ipsec_param_valid(struct xfrm_state *xs, + struct sxe_virtual_context *vt) +{ + s32 ret = -EINVAL; + struct net_device *dev = xs->xso.dev; + struct sxe_adapter *adapter = netdev_priv(dev); + + if ((xs->id.proto != IPPROTO_ESP) && + (xs->id.proto != IPPROTO_AH)) { + LOG_DEV_ERR("unsupport security protol:0x%x.\n", + xs->id.proto); + goto l_out; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->calg) { + LOG_DEV_ERR("unsupport compression offload.\n"); + goto l_out; + } + } else { + if (vt->num_vfs && (vt->bridge_mode != BRIDGE_MODE_VEPA)) { + LOG_ERROR("num_vfs:%d mode:%d not support ipsec add.\n", + vt->num_vfs, vt->bridge_mode); + goto l_out; + } + } + + ret = 0; + LOG_INFO("num_vfs:%d proto:%d flags:%d ipsec param valid pass\n", + vt->num_vfs, xs->id.proto, + xs->xso.flags); + +l_out: + return ret; +} + +static s32 sxe_ipsec_sa_idx_get(struct sxe_ipsec_context *ipsec, bool is_rx) +{ + s32 ret = -ENOSPC; + u16 i; + + if (is_rx) { + if (ipsec->rx_sa_cnt == SXE_IPSEC_SA_CNT_MAX) { + LOG_ERROR("ipsec rx sa cnt reach limit:%u.\n", + SXE_IPSEC_SA_CNT_MAX); + goto l_out; + } + + for (i = 0; i < SXE_IPSEC_SA_CNT_MAX; i++) { + if (!test_and_set_bit(SXE_IPSEC_SA_ENTRY_USED, + &ipsec->rx_table[i].status)) { + ret = i; + break; + } + } + } else { + if (ipsec->tx_sa_cnt == SXE_IPSEC_SA_CNT_MAX) { + LOG_ERROR("ipsec tx sa cnt reach limit:%u.\n", + SXE_IPSEC_SA_CNT_MAX); + goto l_out; + } + + for (i = 0; i < SXE_IPSEC_SA_CNT_MAX; i++) { + if (!test_and_set_bit(SXE_IPSEC_SA_ENTRY_USED, + &ipsec->tx_table[i].status)) { + ret = i; + break; + } + } + } + +l_out: + return ret; +} + +static s32 sxe_ipsec_key_salt_parse(struct xfrm_state *xs, + u32 *key, u32 *salt) +{ + s32 ret = 0; + s8 *xs_key; + unsigned long len; + struct net_device *dev = xs->xso.dev; + struct sxe_adapter *adapter = netdev_priv(dev); + + if (!xs->aead) { + ret = -EINVAL; + LOG_DEV_ERR("ipsec offload algorithm unsupport.(err:%d)\n", ret); + goto l_out; + } + + if (xs->aead->alg_icv_len != SXE_IPSEC_AUTH_BIT_LEN) { + ret = -EINVAL; + LOG_DEV_ERR("ipsec offload icv len:%u " + "unsupport.(err:%d)\n", + xs->aead->alg_icv_len, ret); + goto l_out; + } + + if (strcmp(xs->aead->alg_name, ipsec_aes_name)) { + ret = -EINVAL; + LOG_DEV_ERR("unsupport alg name:%s, just support alg:%s.(err:%d)\n", + xs->aead->alg_name, ipsec_aes_name, ret); + goto l_out; + } + + xs_key = xs->aead->alg_key; + len = xs->aead->alg_key_len; + + if (len == SXE_IPSEC_KEY_SALT_BIT_LEN) { + *salt = *(u32 *)(xs_key + SXE_IPSEC_KEY_BYTE_LEN); + } else if (len == SXE_IPSEC_KEY_BIT_LEN) { + LOG_DEV_INFO("ipsec hw offload parameters missing 32 bit salt value.\n"); + *salt = 0; + } else { + ret = -EINVAL; + LOG_DEV_ERR("unsupport key_salt len:%lu.(err:%d)\n", len, ret); + goto l_out; + } + + memcpy(key, xs_key, sizeof(u32) * SXE_IPSEC_KEY_LEN); + + LOG_INFO("ipsec offload flag:0x%x key_salt len:%lu " + "salt:%u key:0x%x%x%x%x.\n", + xs->xso.flags ,len, + *salt, key[0], key[1], key[2], key[3]); + +l_out: + return ret; +} + +static void sxe_ipsec_rx_ip_tbl_sync(struct sxe_hw *hw, u16 idx, + __be32 *ip_addr) +{ + hw->sec.ops->ipsec_rx_ip_store(hw, ip_addr, SXE_IPSEC_IP_LEN, idx); + + return; +} + +static void sxe_ipsec_rx_sa_tbl_sync(struct sxe_hw *hw, u16 sa_idx, + __be32 spi, struct sxe_rx_sa *sa) +{ + hw->sec.ops->ipsec_rx_spi_store(hw, spi, sa->ip_idx, sa_idx); + + hw->sec.ops->ipsec_rx_key_store(hw, sa->key, SXE_IPSEC_KEY_LEN, + sa->salt, sa->mode, sa_idx); + + return; +} + +static void sxe_ipsec_rx_key_sync(struct sxe_hw *hw, u16 sa_idx, + struct sxe_rx_sa *sa) +{ + hw->sec.ops->ipsec_rx_key_store(hw, sa->key, SXE_IPSEC_KEY_LEN, + sa->salt, sa->mode, sa_idx); + + return; +} + +static void sxe_ipsec_tx_sa_tbl_sync(struct sxe_hw *hw, u16 sa_idx, + struct sxe_tx_sa *sa) +{ + hw->sec.ops->ipsec_tx_key_store(hw, sa->key, SXE_IPSEC_KEY_LEN, + sa->salt, sa_idx); + + return; +} + +static s32 sxe_ipsec_rx_sa_entry_fill(struct xfrm_state *xs, + struct sxe_rx_sa *sa_entry) +{ + s32 ret; + struct net_device *net_dev = xs->xso.dev; + struct sxe_adapter *adapter = netdev_priv(net_dev); + + memset(sa_entry, 0, sizeof(*sa_entry)); + + sa_entry->xs = xs; + +#ifdef CONFIG_SXE_FPGA_SINGLE_PORT + sa_entry->ip_idx = 120; +#endif + + if (xs->id.proto == IPPROTO_ESP) { + sa_entry->decrypt = !!((xs->ealg) || (xs->aead)); + } + + ret = sxe_ipsec_key_salt_parse(xs, sa_entry->key, &sa_entry->salt); + if (ret) { + LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n", + ret); + goto l_out; + } + + if (xs->props.family == AF_INET6) { + memcpy(sa_entry->ip_addr, &xs->id.daddr.a6, SXE_IPV6_ADDR_SIZE); + } else { + memcpy(&sa_entry->ip_addr[SXE_IPV4_ADDR_SIZE - 1], + &xs->id.daddr.a4, + SXE_IPV4_ADDR_SIZE); + } + + sa_entry->mode = SXE_IPSEC_RXMOD_VALID; + if (sa_entry->xs->id.proto == IPPROTO_ESP) { + sa_entry->mode |= SXE_IPSEC_RXMOD_PROTO_ESP; + } + + if (sa_entry->decrypt) { + sa_entry->mode |= SXE_IPSEC_RXMOD_DECRYPT; + } + + if (sa_entry->xs->props.family == AF_INET6) { + sa_entry->mode |= SXE_IPSEC_RXMOD_IPV6; + } + +l_out: + return ret; +} + +static s32 sxe_ipsec_tx_sa_entry_fill(struct xfrm_state *xs, + struct sxe_tx_sa *sa_entry) +{ + s32 ret; + + memset(sa_entry, 0, sizeof(struct sxe_tx_sa)); + + sa_entry->xs = xs; + + if (xs->id.proto == IPPROTO_ESP) { + if ((xs->ealg) || (xs->aead)) { + sa_entry->encrypt = true; + } + } + + ret = sxe_ipsec_key_salt_parse(xs, sa_entry->key, &sa_entry->salt); + if (ret) { + LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n", + ret); + } + + return ret; +} + +static s32 sxe_ipsec_rx_ip_addr_add(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, + struct sxe_rx_sa *sa_entry) +{ + s32 empty_idx = -1; + s32 match_idx = -1; + u16 checked = 0; + u16 i; + s32 ret = 0; + struct sxe_adapter *adapter = hw->adapter; + + for (i = 0; i < SXE_IPSEC_IP_CNT_MAX; i++) { + if ((checked < ipsec->rx_sa_cnt) || (empty_idx < 0)) { + if (test_bit(SXE_IPSEC_SA_ENTRY_USED, &ipsec->ip_table[i].status)) { + if (!memcmp(ipsec->ip_table[i].ip_addr, sa_entry->ip_addr, + sizeof(sa_entry->ip_addr))) { + match_idx = i; + break; + } + checked++; + } else if (empty_idx < 0) { + if (!test_and_set_bit(SXE_IPSEC_IP_ENTRY_USED, + &ipsec->ip_table[i].status)) { + empty_idx = i; + } + } + } + } + + if (ipsec->rx_sa_cnt == 0) { + empty_idx = 0; + } + + if (match_idx >= 0) { + if (empty_idx >= 0) { + clear_bit(SXE_IPSEC_IP_ENTRY_USED, + &ipsec->ip_table[empty_idx].status); + } + sa_entry->ip_idx = match_idx; + ipsec->ip_table[match_idx].ref_cnt++; + LOG_INFO("ip addr:%pI6 matched ip_table[%d] ref_cnt:%d.\n", + sa_entry->ip_addr, match_idx, + ipsec->ip_table[match_idx].ref_cnt); + } else if (empty_idx >= 0) { + +#ifdef CONFIG_SXE_FPGA_SINGLE_PORT + sa_entry->ip_idx += empty_idx; + if (sa_entry->ip_idx > 127) { + LOG_ERROR("iptbl_ind %d is too big.\n", sa_entry->ip_idx); + return -ENOSPC; + } +#else + sa_entry->ip_idx = empty_idx; +#endif + memcpy(ipsec->ip_table[empty_idx].ip_addr, sa_entry->ip_addr, + sizeof(sa_entry->ip_addr)); + ipsec->ip_table[empty_idx].ref_cnt = 1; + + sxe_ipsec_rx_ip_tbl_sync(hw, empty_idx, sa_entry->ip_addr); + + LOG_INFO("ip addr:%pI6 use new entry:%d.\n", + sa_entry->ip_addr, empty_idx); + } else { + ret = -ENOSPC; + LOG_DEV_ERR("ip table full, has no space for new ip addr:%pI6." + "(err:%d)\n", sa_entry->ip_addr, ret); + } + + return ret; +} + +static void sxe_ipsec_rx_sa_add(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, struct xfrm_state *xs, + struct sxe_rx_sa *sa_entry, u16 sa_idx) +{ + memcpy(&ipsec->rx_table[sa_idx], sa_entry, sizeof(struct sxe_rx_sa)); + + sxe_ipsec_rx_sa_tbl_sync(hw, sa_idx, xs->id.spi, sa_entry); + xs->xso.offload_handle = sa_idx + SXE_IPSEC_RX_INDEX_BASE; + + ipsec->rx_sa_cnt++; + + hash_add_rcu(ipsec->rx_table_list, &ipsec->rx_table[sa_idx].hlist, + (__force u32)sa_entry->xs->id.spi); + + LOG_INFO("rx_sa_table[%u] add done, rx_sa_cnt:%u.\n", + sa_idx, ipsec->rx_sa_cnt); + + return; +} + +static void sxe_ipsec_tx_sa_add(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, struct xfrm_state *xs, + struct sxe_tx_sa *sa_entry, u16 sa_idx) +{ + memcpy(&ipsec->tx_table[sa_idx], sa_entry, sizeof(struct sxe_tx_sa)); + + sxe_ipsec_tx_sa_tbl_sync(hw, sa_idx, sa_entry); + xs->xso.offload_handle = sa_idx + SXE_IPSEC_TX_INDEX_BASE; + + ipsec->tx_sa_cnt++; + + LOG_INFO("tx_sa_table[%u] add done, tx_sa_cnt:%u.\n", + sa_idx, ipsec->tx_sa_cnt); + + return; +} + +static s32 sxe_ipsec_tx_xs_add(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, + struct xfrm_state *xs) +{ + s32 ret; + u16 sa_idx; + struct sxe_tx_sa sa_entry; + struct sxe_adapter *adapter = hw->adapter; + + ret = sxe_ipsec_sa_idx_get(ipsec, false); + if (ret < 0) { + LOG_DEV_ERR("tx sa table no space.(err:%d)\n", ret); + goto l_out; + } + + sa_idx = (u16)ret; + + ret = sxe_ipsec_tx_sa_entry_fill(xs, &sa_entry); + if (ret) { + LOG_ERROR("ipsec offload param parse fail.(err:%d)\n", ret); + goto clear_used_xs; + } + + sa_entry.status = ipsec->tx_table[sa_idx].status; + + sxe_ipsec_tx_sa_add(ipsec, hw, xs, &sa_entry, sa_idx); + +l_out: + return ret; + +clear_used_xs: + clear_bit(SXE_IPSEC_SA_ENTRY_USED, &ipsec->tx_table[sa_idx].status); + + return ret; + +} + +static s32 sxe_ipsec_rx_xs_add(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, + struct xfrm_state *xs) +{ + struct sxe_rx_sa sa_entry; + struct sxe_adapter *adapter = hw->adapter; + u16 sa_idx; + s32 ret; + + ret = sxe_ipsec_sa_idx_get(ipsec, true); + if (ret < 0) { + LOG_DEV_ERR("rx sa table no space.(err:%d)\n", ret); + goto l_out; + } + + sa_idx = (u16)ret; + + ret = sxe_ipsec_rx_sa_entry_fill(xs, &sa_entry); + if (ret) { + LOG_ERROR("ipsec offload param parse fail.(err:%d)\n", ret); + goto clear_used_xs; + } + + sa_entry.status = ipsec->rx_table[sa_idx].status; + + ret = sxe_ipsec_rx_ip_addr_add(ipsec, hw, &sa_entry); + if (ret) { + LOG_ERROR("ip addr:%pI6 add to ip table fail.(err:%d)\n", + sa_entry.ip_addr, ret); + goto clear_used_xs; + } + +#ifdef CONFIG_SXE_FPGA_SINGLE_PORT + if ((sa_entry.ip_idx > 127) || (sa_idx > 23)) { + LOG_ERROR("iptbl_ind %d or sa_idx %d is too big.\n", + sa_entry.ip_idx, sa_idx); + return -ENOSPC; + } +#endif + + sxe_ipsec_rx_sa_add(ipsec, hw, xs, &sa_entry, sa_idx); + +l_out: + return ret; + +clear_used_xs: + clear_bit(SXE_IPSEC_SA_ENTRY_USED, &ipsec->rx_table[sa_idx].status); + + return ret; +} + +static s32 sxe_ipsec_state_add(struct xfrm_state *xs) +{ + s32 ret; + struct net_device *net_dev = xs->xso.dev; + struct sxe_adapter *adapter = netdev_priv(net_dev); + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_hw *hw = &adapter->hw; + struct sxe_virtual_context *vt = &adapter->vt_ctxt; + + ret = sxe_ipsec_param_valid(xs, vt); + if (ret) { + LOG_ERROR("ipsec offload param invalid.(err:%d)\n", + ret); + goto l_out; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + ret = sxe_ipsec_rx_xs_add(ipsec, hw, xs); + } else { + ret = sxe_ipsec_tx_xs_add(ipsec, hw, xs); + } + + if (ret) { + LOG_ERROR("ipsec flag:0x%x sa add fail.(err:%d)\n", + xs->xso.flags, ret); + goto l_out; + } + + if (!(adapter->cap & SXE_IPSEC_ENABLED)) { + hw->sec.ops->ipsec_engine_start(hw, adapter->link.is_up); + adapter->cap |= SXE_IPSEC_ENABLED; + LOG_INFO("ipsec engine started.\n"); + } + +l_out: + return ret; +} + +static void sxe_ipsec_rx_sa_entry_clear(struct sxe_rx_sa *sa_entry) +{ + u32 clear_len = sizeof(struct sxe_rx_sa) - sizeof(sa_entry->status); + + memset(sa_entry, 0, clear_len); + + return; +} + +static void sxe_ipsec_tx_sa_entry_clear(struct sxe_tx_sa *sa_entry) +{ + u32 clear_len = sizeof(struct sxe_tx_sa) - sizeof(sa_entry->status); + + memset(sa_entry->key, 0, clear_len); + + return; +} + +static void sxe_ipsec_rx_xs_del(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, + unsigned long offload_handle) +{ + struct sxe_rx_sa *sa_entry; + struct sxe_rx_ip *ip_entry; + struct sxe_adapter *adapter = hw->adapter; + u16 sa_idx = offload_handle - SXE_IPSEC_RX_INDEX_BASE; + u8 ip_idx; + __be32 spi; + + sa_entry = &ipsec->rx_table[sa_idx]; + + if (!test_bit(SXE_IPSEC_SA_ENTRY_USED, &sa_entry->status)) { + LOG_DEV_ERR("invalid sa_idx:%u status:0x%lx not in used.\n", + sa_idx, sa_entry->status); + goto l_out; + } + + ip_idx = sa_entry->ip_idx; + spi = sa_entry->xs->id.spi; + + sxe_ipsec_rx_sa_entry_clear(sa_entry); + sxe_ipsec_rx_sa_tbl_sync(hw, sa_idx, spi, sa_entry); + hash_del_rcu(&sa_entry->hlist); + + ip_entry = &ipsec->ip_table[ip_idx]; + if (ip_entry->ref_cnt > 0) { + ip_entry->ref_cnt--; + if (!ip_entry->ref_cnt) { + LOG_INFO("del rx_ip_table[%d] ip_addr:%pI6.\n", + ip_idx, ip_entry->ip_addr); + + memset(ip_entry->ip_addr, 0, sizeof(ip_entry->ip_addr)); + sxe_ipsec_rx_ip_tbl_sync(hw, ip_idx, ip_entry->ip_addr); + clear_bit(SXE_IPSEC_IP_ENTRY_USED, &ip_entry->status); + } + } + + ipsec->rx_sa_cnt--; + clear_bit(SXE_IPSEC_SA_ENTRY_USED, &sa_entry->status); + + LOG_INFO("del rx_sa_table[%u] success ip_table[%u] " + "ref_cnt:%u rx_sa_cnt:%u.\n", + sa_idx, ip_idx, + ip_entry->ref_cnt, + ipsec->rx_sa_cnt); + +l_out: + return; +} + +static void sxe_ipsec_tx_xs_del(struct sxe_ipsec_context *ipsec, + struct sxe_hw *hw, + unsigned long offload_handle) +{ + struct sxe_tx_sa *sa_entry; + struct sxe_adapter *adapter = hw->adapter; + u16 sa_idx = offload_handle - SXE_IPSEC_TX_INDEX_BASE; + sa_entry = &ipsec->tx_table[sa_idx]; + + if (!test_bit(SXE_IPSEC_SA_ENTRY_USED, &sa_entry->status)) { + LOG_DEV_ERR("invalid sa_idx:%u status:0x%lx not in used.\n", + sa_idx, sa_entry->status); + goto l_out; + } + + sa_entry = &ipsec->tx_table[sa_idx]; + sxe_ipsec_tx_sa_entry_clear(sa_entry); + sxe_ipsec_tx_sa_tbl_sync(hw, sa_idx, sa_entry); + ipsec->tx_sa_cnt--; + + clear_bit(SXE_IPSEC_SA_ENTRY_USED, &sa_entry->status); + + LOG_INFO("del tx_sa_table[%u] success tx_sa_cnt:%u.\n", + sa_idx, ipsec->tx_sa_cnt); + +l_out: + return; +} + +static void sxe_ipsec_state_delete(struct xfrm_state *xs) +{ + struct net_device *net_dev = xs->xso.dev; + struct sxe_adapter *adapter = netdev_priv(net_dev); + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_hw *hw = &adapter->hw; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sxe_ipsec_rx_xs_del(ipsec, hw, xs->xso.offload_handle); + } else { + sxe_ipsec_tx_xs_del(ipsec, hw, xs->xso.offload_handle); + } + + if ((ipsec->rx_sa_cnt == 0) && (ipsec->tx_sa_cnt == 0)) { + adapter->cap &= ~SXE_IPSEC_ENABLED; + hw->sec.ops->ipsec_engine_stop(hw, adapter->link.is_up); + LOG_INFO("ipsec engine stopped.\n"); + } + + return; +} + +static bool sxe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + bool ret = true; + + if (xs->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != 5) { + LOG_ERROR("sxe ipsec offload unsupport ipv4 " + "header with option, hdr len:%d.\n", + ip_hdr(skb)->ihl); + ret = false; + } + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) { + LOG_ERROR("sxe ipsec offload unsupport ipv6 " + "header with ext hdr\n"); + ret = false; + } + } + + return ret; +} + +static void sxe_ipsec_hw_table_clear(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u16 idx; + struct sxe_rx_sa rx_sa_entry = {}; + struct sxe_tx_sa tx_sa_entry = {}; + + hw->sec.ops->ipsec_sa_disable(hw); + + for (idx = 0; idx < SXE_IPSEC_SA_CNT_MAX; idx++) { + sxe_ipsec_rx_key_sync(hw, idx, &rx_sa_entry); + + sxe_ipsec_tx_sa_tbl_sync(hw, idx, &tx_sa_entry); + } + + return; +} + +static const struct xfrmdev_ops sxe_xfrmdev_ops = { + .xdo_dev_offload_ok = sxe_ipsec_offload_ok, + .xdo_dev_state_add = sxe_ipsec_state_add, + .xdo_dev_state_delete = sxe_ipsec_state_delete, +}; + +void sxe_ipsec_offload_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + u32 size; + + if (hw->sec.ops->ipsec_offload_is_disable(hw)) { + LOG_INFO("ipsec rx/tx offload disable, no need init ipsec.\n"); + goto l_out; + } + + hash_init(ipsec->rx_table_list); + + size = sizeof(struct sxe_rx_sa) * SXE_IPSEC_SA_CNT_MAX; + ipsec->rx_table = kzalloc(size, GFP_KERNEL); + if (ipsec->rx_table == NULL) { + LOG_DEV_ERR("ipsec rx sa table mem:%uB alloc fail.\n", size); + goto l_out; + } + + size = sizeof(struct sxe_rx_ip) * SXE_IPSEC_IP_CNT_MAX; + ipsec->ip_table = kzalloc(size, GFP_KERNEL); + if (ipsec->ip_table == NULL) { + LOG_DEV_ERR("ipsec rx ip table mem:%uB alloc fail.\n", size); + goto l_free_rx_sa_table; + } + + size = sizeof(struct sxe_tx_sa) * SXE_IPSEC_SA_CNT_MAX; + ipsec->tx_table = kzalloc(size, GFP_KERNEL); + if (ipsec->tx_table == NULL) { + LOG_DEV_ERR("ipsec tx sa table mem:%uB alloc fail.\n", size); + goto l_free_rx_ip_table; + } + + ipsec->rx_sa_cnt = 0; + ipsec->tx_sa_cnt = 0; + + sxe_ipsec_hw_table_clear(adapter); + + adapter->netdev->xfrmdev_ops = &sxe_xfrmdev_ops; + + LOG_INFO("ipsec init done.\n"); + +l_out: + return; + +l_free_rx_ip_table: + SXE_KFREE(ipsec->ip_table); + +l_free_rx_sa_table: + SXE_KFREE(ipsec->rx_table); + return; +} + +void sxe_ipsec_table_restore(struct sxe_adapter *adapter) +{ + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_hw *hw = &adapter->hw; + u16 idx; + + if (!(adapter->cap & SXE_IPSEC_ENABLED)) { + LOG_INFO("sxe ipsec disabled, no need reload.\n"); + goto l_out; + } + + sxe_ipsec_hw_table_clear(adapter); + + for(idx = 0; idx < SXE_IPSEC_SA_CNT_MAX; idx++) { + struct sxe_rx_sa *rx_sa_entry = &ipsec->rx_table[idx]; + struct sxe_tx_sa *tx_sa_entry = &ipsec->tx_table[idx]; + + if (test_bit(SXE_IPSEC_SA_ENTRY_USED, &rx_sa_entry->status)) { + if (rx_sa_entry->mode & SXE_IPSEC_RXTXMOD_VF) { + struct xfrm_state *xs = rx_sa_entry->xs; + sxe_ipsec_rx_xs_del(ipsec, hw, + rx_sa_entry->xs->xso.offload_handle); + SXE_KFREE(xs->aead); + SXE_KFREE(xs); + } else { + sxe_ipsec_rx_sa_tbl_sync(hw, idx, + rx_sa_entry->xs->id.spi,rx_sa_entry); + } + } + + if (test_bit(SXE_IPSEC_SA_ENTRY_USED, &tx_sa_entry->status)) { + if (tx_sa_entry->mode & SXE_IPSEC_RXTXMOD_VF) { + struct xfrm_state *xs = tx_sa_entry->xs; + sxe_ipsec_tx_xs_del(ipsec, hw, + xs->xso.offload_handle); + SXE_KFREE(xs->aead); + SXE_KFREE(xs); + } else { + sxe_ipsec_tx_sa_tbl_sync(hw, idx, tx_sa_entry); + } + } + } + + for (idx = 0; idx < SXE_IPSEC_IP_CNT_MAX; idx++) { + struct sxe_rx_ip *ip_entry = &ipsec->ip_table[idx]; + + if (test_bit(SXE_IPSEC_IP_ENTRY_USED, &ip_entry->status)) { + sxe_ipsec_rx_ip_tbl_sync(hw, idx, ip_entry->ip_addr); + } + } + + LOG_INFO("ipsec table reload done.\n"); + +l_out: + return; +} + +static s32 sxe_vf_ipsec_xs_fill(struct net_device *netdev, + struct xfrm_state *xs, + struct sxe_ipsec_add_msg *msg) +{ + s32 ret; + struct xfrm_algo_desc *algo; + unsigned long aead_len; + + xs->xso.flags = msg->flags; + xs->id.spi = msg->spi; + xs->id.proto = msg->proto; + xs->props.family = msg->family; + + if (xs->props.family == AF_INET6) { + memcpy(&xs->id.daddr.a6, msg->ip_addr, sizeof(xs->id.daddr.a6)); + } else { + memcpy(&xs->id.daddr.a4, msg->ip_addr, sizeof(xs->id.daddr.a4)); + } + + xs->xso.dev = netdev; + + algo = xfrm_aead_get_byname(ipsec_aes_name, SXE_IPSEC_AUTH_BIT_LEN, 1); + if (unlikely(!algo)) { + ret = -ENOENT; + LOG_ERROR("algo desc get fail.(err:%d)\n", ret); + goto l_out; + } + + aead_len = sizeof(*xs->aead) + SXE_IPSEC_KEY_SALT_BYTE_LEN; + xs->aead = kzalloc(aead_len, GFP_KERNEL); + if (unlikely(!xs->aead)) { + ret = -ENOMEM; + LOG_ERROR("algo aead mem:%zu alloc fail.(err:%d)\n", + aead_len, ret); + goto l_out; + } + + xs->props.ealgo = algo->desc.sadb_alg_id; + xs->geniv = algo->uinfo.aead.geniv; + xs->aead->alg_icv_len = SXE_IPSEC_AUTH_BIT_LEN; + xs->aead->alg_key_len = SXE_IPSEC_KEY_SALT_BIT_LEN; + memcpy(xs->aead->alg_key, msg->key, sizeof(msg->key)); + memcpy(xs->aead->alg_name, ipsec_aes_name, sizeof(ipsec_aes_name)); + + ret = 0; + +l_out: + return ret; +} + +s32 sxe_vf_ipsec_add(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx) +{ + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_ipsec_add_msg *ipsec_msg = (struct sxe_ipsec_add_msg *)msg; + struct xfrm_state *xs; + s32 ret; + u16 sa_idx; + unsigned long offload_handle; + + if ((!adapter->vt_ctxt.vf_info[vf_idx].trusted) || + !(adapter->cap & SXE_VF_IPSEC_ENABLED)) { + ret = -EACCES; + LOG_MSG_WARN(drv, "vf_idx:%u trusted:%d cap:0x%x no perm to " + "add ipsec.(err:%d)\n", + vf_idx, + adapter->vt_ctxt.vf_info[vf_idx].trusted, + adapter->cap, ret); + goto l_out; + } + + if (!(ipsec_msg->flags & XFRM_OFFLOAD_INBOUND)) { + ret = -EOPNOTSUPP; + LOG_ERROR("vf_idx:%u just support rx ipsec offload.(err:%d)\n", + vf_idx, ret); + goto l_out; + } + + xs = kzalloc(sizeof(struct xfrm_state), GFP_KERNEL); + if (unlikely(!xs)) { + ret = -ENOMEM; + LOG_ERROR("vf_idx:%d add ipsec, xs alloc %zuB fail.\n", + vf_idx, sizeof(struct xfrm_state)); + goto l_out; + } + + ret = sxe_vf_ipsec_xs_fill(adapter->netdev, xs, ipsec_msg); + if (ret) { + LOG_ERROR("vf_idx:%u ipsec msg fill xfrm xs fail.(err:%d)\n", + vf_idx, ret); + goto xfrm_state_free; + } + + ret = sxe_ipsec_state_add(xs); + if (ret) { + LOG_ERROR("vf_idx:%u ipsec xfrm xs add fail.(err:%d)\n", + vf_idx, ret); + goto xfrm_aead_free; + } + + offload_handle = xs->xso.offload_handle; + if (offload_handle < SXE_IPSEC_TX_INDEX_BASE) { + sa_idx = offload_handle - SXE_IPSEC_RX_INDEX_BASE; + ipsec->rx_table[sa_idx].vf_idx = vf_idx; + ipsec->rx_table[sa_idx].mode |= SXE_IPSEC_RXTXMOD_VF; + } else { + sa_idx = offload_handle - SXE_IPSEC_TX_INDEX_BASE; + ipsec->tx_table[sa_idx].vf_idx = vf_idx; + ipsec->tx_table[sa_idx].mode |= SXE_IPSEC_RXTXMOD_VF; + } + + ipsec_msg->pf_sa_idx = xs->xso.offload_handle; + +l_out: + return ret; + +xfrm_aead_free: + SXE_KFREE(xs->aead); + +xfrm_state_free: + SXE_KFREE(xs); + + return ret; +} + +s32 sxe_vf_ipsec_del(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx) +{ + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_rx_sa *sa_entry; + struct xfrm_state *xs; + struct sxe_ipsec_del_msg *del_msg = (struct sxe_ipsec_del_msg *)msg; + u32 sa_idx = del_msg->pf_sa_idx - SXE_IPSEC_RX_INDEX_BASE; + s32 ret = 0; + + if (!adapter->vt_ctxt.vf_info[vf_idx].trusted) { + ret = -EPERM; + LOG_MSG_ERR(drv, "vf_idx:%u untrusted, no perm del ipsec xs.(err:%d)\n", + vf_idx, ret); + goto l_out; + } + + if (sa_idx > SXE_IPSEC_SA_CNT_MAX) { + ret = -EINVAL; + LOG_MSG_ERR(drv, "vf_idx:%d ipsec msg sa_idx:%u invalid.(err:%d)\n", + vf_idx, sa_idx, ret); + goto l_out; + } + + sa_entry = &ipsec->rx_table[sa_idx]; + if (!test_bit(SXE_IPSEC_SA_ENTRY_USED, &sa_entry->status)) { + LOG_INFO("vf_idx:%u sa_idx:%u not in used.\n", + vf_idx, sa_idx); + goto l_out; + } + + if (!(sa_entry->mode & SXE_IPSEC_RXTXMOD_VF) || + (sa_entry->vf_idx != vf_idx)) { + ret = -ENOENT; + LOG_MSG_ERR(drv, "vf_idx:%u sa_idx:%u error.(err:%d)\n", + vf_idx, sa_idx, ret); + goto l_out; + } + + xs = sa_entry->xs; + sxe_ipsec_state_delete(sa_entry->xs); + + SXE_KFREE(xs->aead); + SXE_KFREE(xs); + + LOG_INFO("vf_idx:%u sa_idx:%u del success.\n", + vf_idx, sa_idx); + +l_out: + return ret; +} + +void sxe_vf_ipsec_entry_clear(struct sxe_adapter *adapter, u32 vf_idx) +{ + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + struct sxe_rx_sa *rx_sa; + struct sxe_tx_sa *tx_sa; + u16 idx; + + for (idx = 0; idx < SXE_IPSEC_SA_CNT_MAX; idx++) { + rx_sa = &ipsec->rx_table[idx]; + tx_sa = &ipsec->tx_table[idx]; + + if (test_bit(SXE_IPSEC_SA_ENTRY_USED, &rx_sa->status) && + (rx_sa->mode & SXE_IPSEC_RXTXMOD_VF) && + (rx_sa->vf_idx == vf_idx)) { + struct xfrm_state *xs = rx_sa->xs; + LOG_INFO("del vf_idx:%u ipsec entry:%u.\n", vf_idx, idx); + sxe_ipsec_state_delete(xs); + SXE_KFREE(xs->aead); + SXE_KFREE(xs); + } + + if (test_bit(SXE_IPSEC_SA_ENTRY_USED, &rx_sa->status) && + (tx_sa->mode & SXE_IPSEC_RXTXMOD_VF) && + (tx_sa->vf_idx == vf_idx)) { + struct xfrm_state *xs = tx_sa->xs; + LOG_INFO("del vf_idx:%u ipsec entry:%u.\n", vf_idx, idx); + sxe_ipsec_state_delete(xs); + SXE_KFREE(xs->aead); + SXE_KFREE(xs); + } + } + + return; +} + +void sxe_ipsec_offload_exit(struct sxe_adapter *adapter) +{ + struct sxe_ipsec_context *ipsec = &adapter->ipsec; + + SXE_KFREE(ipsec->ip_table); + SXE_KFREE(ipsec->rx_table); + SXE_KFREE(ipsec->tx_table); + + LOG_INFO("ipsec exit done.\n"); + + return; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.h new file mode 100644 index 000000000000..fe0cba7aca93 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ipsec.h @@ -0,0 +1,109 @@ + +#ifndef __SXE_IPSEC_H__ +#define __SXE_IPSEC_H__ + +#include "sxe_ring.h" + +#ifdef CONFIG_SXE_FPGA_SINGLE_PORT +#undef SXE_IPSEC_MAX_SA_COUNT +#undef SXE_IPSEC_MAX_RX_IP_COUNT +#undef SXE_IPSEC_BASE_TX_INDEX +#define SXE_IPSEC_MAX_SA_COUNT 24 +#define SXE_IPSEC_MAX_RX_IP_COUNT 8 +#define SXE_IPSEC_BASE_TX_INDEX SXE_IPSEC_MAX_SA_COUNT +#endif + +#define SXE_IPSEC_SA_CNT_MAX (1024) +#define SXE_IPSEC_IP_CNT_MAX (128) + +#define SXE_IPSEC_RX_INDEX_BASE (0) +#define SXE_IPSEC_TX_INDEX_BASE (SXE_IPSEC_SA_CNT_MAX) + +#define SXE_IPSEC_AUTH_BIT_LEN (128) +#define SXE_IPSEC_SA_ENTRY_USED (0x1) +#define SXE_IPSEC_IP_ENTRY_USED (0x1) + +#define SXE_IPSEC_IP_LEN (4) +#define SXE_IPSEC_KEY_LEN (4) +#define SXE_IPSEC_KEY_SALT_BIT_LEN (160) +#define SXE_IPSEC_KEY_BIT_LEN (128) +#define SXE_IPSEC_KEY_SALT_BYTE_LEN (SXE_IPSEC_KEY_SALT_BIT_LEN / 8) +#define SXE_IPSEC_KEY_BYTE_LEN (SXE_IPSEC_KEY_BIT_LEN / 8) + +#define SXE_IPSEC_PADLEN_OFFSET (SXE_IPSEC_KEY_BYTE_LEN + 2) + +#define SXE_IPV4_ADDR_SIZE (4) +#define SXE_IPV6_ADDR_SIZE (16) + +#define SXE_IPSEC_RXMOD_VALID 0x00000001 +#define SXE_IPSEC_RXMOD_PROTO_ESP 0x00000004 +#define SXE_IPSEC_RXMOD_DECRYPT 0x00000008 +#define SXE_IPSEC_RXMOD_IPV6 0x00000010 +#define SXE_IPSEC_RXTXMOD_VF 0x00000020 + +struct sxe_tx_sa { + struct xfrm_state *xs; + u32 key[SXE_IPSEC_KEY_LEN]; + u32 salt; + u32 mode; + bool encrypt; + u16 vf_idx; + unsigned long status; +}; + +struct sxe_rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + + u32 key[SXE_IPSEC_KEY_LEN]; + u32 salt; + __be32 ip_addr[SXE_IPSEC_IP_LEN]; + u32 mode; + u8 ip_idx; + + u16 vf_idx; + bool decrypt; + unsigned long status; +}; + +struct sxe_rx_ip { + __be32 ip_addr[SXE_IPSEC_IP_LEN]; + u16 ref_cnt; + unsigned long status; +}; + +struct sxe_ipsec_context { + u16 rx_sa_cnt; + u16 tx_sa_cnt; + atomic64_t rx_ipsec; + + struct sxe_rx_ip *ip_table; + struct sxe_rx_sa *rx_table; + + struct sxe_tx_sa *tx_table; + + DECLARE_HASHTABLE(rx_table_list, 10); +}; + +s32 sxe_tx_ipsec_offload(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *first, + struct sxe_tx_context_desc *ctxt_desc); + +void sxe_rx_ipsec_proc(struct sxe_ring *tx_ring, + union sxe_rx_data_desc *desc, + struct sk_buff *skb); + +void sxe_ipsec_offload_init(struct sxe_adapter *adapter); + +void sxe_ipsec_table_restore(struct sxe_adapter *adapter); + +void sxe_ipsec_offload_exit(struct sxe_adapter *adapter); + +s32 sxe_vf_ipsec_add(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx); + +s32 sxe_vf_ipsec_del(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx); + +void sxe_vf_ipsec_entry_clear(struct sxe_adapter *adapter, u32 vf_idx); + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.c new file mode 100644 index 000000000000..fa8d96c44af3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.c @@ -0,0 +1,1630 @@ + +#include +#include +#include +#include +#include + +#include "sxe.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxe_irq.h" +#include "sxe_pci.h" +#include "sxe_regs.h" +#include "sxe_rx_proc.h" +#include "sxe_tx_proc.h" +#include "sxe_log.h" +#include "sxe_sriov.h" +#include "sxe_monitor.h" +#include "sxe_netdev.h" +#include "sxe_xdp.h" +#include "sxe_host_hdc.h" + +#ifdef SXE_SFP_DEBUG +static unsigned int sw_sfp_los_delay_ms = SXE_SW_SFP_LOS_DELAY_MS; +static unsigned int hw_spp_proc_delay_us = SXE_SPP_PROC_DELAY_US; +#ifndef SXE_TEST +module_param(sw_sfp_los_delay_ms, uint, 0); +MODULE_PARM_DESC(sw_sfp_los_delay_ms, "LOS_N(sdp 1) interrupt software filtering time - default is 200"); + +module_param(hw_spp_proc_delay_us, uint, 0); +MODULE_PARM_DESC(hw_spp_proc_delay_us, "SDP interrupt filtering time - default is 7"); +#endif +#endif + +#ifdef NETIF_NAPI_ADD_API_NEED_3_PARAMS +static inline void +netif_napi_add_compat(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netif_napi_add(dev, napi, poll); +} + +#define netif_napi_add(dev, napi, poll, weight) netif_napi_add_compat(dev, napi, poll, weight) +#endif + +static void sxe_enable_irq(struct sxe_adapter *adapter, bool is_ring, + bool is_flush) +{ + struct sxe_hw *hw = &adapter->hw; + + u32 value = (SXE_EIMS_ENABLE_MASK & ~SXE_EIMS_RTX_QUEUE); + + if (test_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state)) { + value &= ~SXE_EIMS_LSC; + } + + value |= SXE_EIMS_GPI_SPP1; + value |= SXE_EIMS_GPI_SPP2; + value |= SXE_EIMS_MAILBOX; + value |= SXE_EIMS_ECC; + value |= SXE_EIMS_HDC; + + if ((adapter->cap & SXE_FNAV_SAMPLE_ENABLE) && + !test_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state)) { + value |= SXE_EIMS_FLOW_NAV; + } + + hw->irq.ops->specific_irq_enable(hw, value); + + if (is_ring) { + hw->irq.ops->ring_irq_enable(hw, ~0); + } + + if (is_flush) { + hw->setup.ops->regs_flush(hw); + } + + return; +} + +static void sxe_irq_num_init(struct sxe_adapter *adapter) +{ + u16 total; + u16 ring_irq; + u16 cpu_cnt = num_online_cpus(); + + ring_irq = max(adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num); + ring_irq = max(ring_irq, adapter->xdp_ring_ctxt.num); + + ring_irq = min_t(u16, ring_irq, cpu_cnt); + + total = ring_irq + SXE_EVENT_IRQ_NUM; + + total = min_t(u16, total, adapter->irq_ctxt.max_irq_num); + + adapter->irq_ctxt.total_irq_num = total; + + adapter->irq_ctxt.ring_irq_num = total - SXE_EVENT_IRQ_NUM; + + LOG_INFO("msi-x interrupt rx_ring_num:%u tx_ring_num:%u " + "xdp_ring_num:%u cpu cnt:%u max_irq_num:%u " + "total_irq_num:%u ring_irq_num:%u event_irq_num:%u \n", + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num, + cpu_cnt, + adapter->irq_ctxt.max_irq_num, + adapter->irq_ctxt.total_irq_num, + adapter->irq_ctxt.ring_irq_num, + SXE_EVENT_IRQ_NUM); + + return; +} + +static void sxe_irq_num_reinit(struct sxe_adapter *adapter) +{ + adapter->irq_ctxt.total_irq_num = SXE_RING_IRQ_MIN_NUM; + adapter->irq_ctxt.ring_irq_num = SXE_RING_IRQ_MIN_NUM; + + LOG_INFO("non-msix interrupt rxr_num:%u txr_num:%u " + "xdp_num:%u max_irq_num:%u total_irq_num:%u " + "ring_irq_num:%u\n", + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num, + adapter->irq_ctxt.max_irq_num, + adapter->irq_ctxt.total_irq_num, + adapter->irq_ctxt.ring_irq_num); + + return; +} + +int sxe_msi_irq_init(struct sxe_adapter *adapter) +{ + int ret; + + ret = pci_enable_msi(adapter->pdev); + if (ret) { + adapter->cap &= ~SXE_MSI_ENABLED; + LOG_DEV_ERR("enable msi interrupt fail. cap:0x%x.(err:%d)\n", + adapter->cap, ret); + } else { + adapter->cap |= SXE_MSI_ENABLED; + LOG_INFO("enable msi irq done ret:%d. cap:0x%x\n", + ret, adapter->cap); + } + + return ret; +} + +s32 sxe_config_space_irq_num_get(struct sxe_adapter *adapter) +{ + int ret = 0; + u16 msix_num; + struct sxe_hw *hw = &adapter->hw; + + msix_num = sxe_read_pci_cfg_word(adapter->pdev, + hw, SXE_PCIE_MSIX_CAPS_OFFSET); + if (msix_num == SXE_READ_CFG_WORD_FAILED) { + ret = -EIO; + LOG_ERROR_BDF("msi-x caps read fail due to adapter removed.(err:%d)\n", + ret); + goto l_out; + } + + msix_num &= SXE_PCIE_MSIX_ENTRY_MASK; + + msix_num++; + + msix_num = (msix_num > SXE_MSIX_IRQ_MAX_NUM) ? + SXE_MSIX_IRQ_MAX_NUM : msix_num; + + adapter->irq_ctxt.max_irq_num = msix_num; + +l_out: + return ret; +} + +void sxe_disable_dcb(struct sxe_adapter *adapter) +{ + if (sxe_dcb_tc_get(adapter) > 1) { + LOG_DEV_WARN("number of DCB TCs exceeds number of available queues." + "disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + adapter->cap &= ~SXE_DCB_ENABLE; + adapter->dcb_ctxt.cee_temp_cfg.pfc_mode_enable = false; + adapter->dcb_ctxt.cee_cfg.pfc_mode_enable = false; + } + + sxe_dcb_tc_set(adapter, 0); + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs = 1; + + LOG_INFO_BDF("dcb disabled cap:0x%x.\n", adapter->cap); + + return; +} + +static int sxe_disable_sriov(struct sxe_adapter *adapter) +{ + LOG_DEV_WARN("disabling SR-IOV support.\n"); + + sxe_vf_resource_release(adapter); + sxe_vf_disable(adapter); + + pci_disable_sriov(adapter->pdev); + + return 0; +} + +void sxe_disable_rss(struct sxe_adapter *adapter) +{ + LOG_DEV_WARN("disabling RSS support.\n"); + + adapter->ring_f.rss_limit = 1; + adapter->cap &= ~SXE_RSS_ENABLE; + + LOG_INFO_BDF("rss disabled rss_limit:%u cap:0x%x.\n", + adapter->ring_f.rss_limit, adapter->cap); + + return; +} + +static bool sxe_is_irq_bind_cpu(struct sxe_adapter *adapter) +{ + if (!(adapter->cap & SXE_DCB_ENABLE) && + !(adapter->cap & SXE_SRIOV_ENABLE) && + (adapter->cap & SXE_RSS_ENABLE) && + (adapter->cap & SXE_FNAV_SAMPLE_ENABLE)) { + LOG_INFO("cap:0x%x need alloc memory in cpu node.\n", + adapter->cap); + return true; + } else { + LOG_INFO("cap:0x%x no need alloc memory in cpu node.\n", + adapter->cap); + return false; + } +} + +static s32 sxe_msix_irq_init(struct sxe_adapter *adapter) +{ + u16 i; + s32 ret; + u16 total = adapter->irq_ctxt.total_irq_num; + + adapter->irq_ctxt.msix_entries = kcalloc(total, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->irq_ctxt.msix_entries == NULL) { + ret = -ENOMEM; + LOG_ERROR_BDF("msi-x irq entry num:%u per size:%lu kcalloc fail." + "(err:%d)\n", + total, sizeof(struct msix_entry), ret); + goto l_out; + } + + for (i = 0; i < total; i++) { + adapter->irq_ctxt.msix_entries[i].entry = i; + } + + ret = pci_enable_msix_range(adapter->pdev, + adapter->irq_ctxt.msix_entries, + SXE_MSIX_IRQ_MIN_NUM, total); + if (ret < 0) { + adapter->cap &= ~SXE_MSIX_ENABLED; + SXE_KFREE(adapter->irq_ctxt.msix_entries); + + LOG_DEV_ERR("min:%u max:%u pci enable msi-x failed.(err:%d)\n", + SXE_MSIX_IRQ_MIN_NUM, total, ret); + } else { + adapter->cap |= SXE_MSIX_ENABLED; + if (ret != total) { + adapter->irq_ctxt.total_irq_num = ret; + adapter->irq_ctxt.ring_irq_num = ret - + SXE_EVENT_IRQ_NUM; + } + LOG_WARN_BDF("enable %d pci msix entry.min:%u max:%u" + " total irq num:%u ring irq num:%u cap:0x%x\n", + ret, SXE_MSIX_IRQ_MIN_NUM, total, + adapter->irq_ctxt.total_irq_num, + adapter->irq_ctxt.ring_irq_num, + adapter->cap); + + ret = 0; + } + +l_out: + return ret; +} + +static inline void sxe_irq_non_msix_configure(struct sxe_adapter *adapter) +{ + sxe_disable_dcb(adapter); + sxe_disable_sriov(adapter); + sxe_disable_rss(adapter); + sxe_ring_num_set(adapter); + sxe_irq_num_reinit(adapter); + + return; +} + +static void sxe_irq_mode_init(struct sxe_adapter *adapter) +{ + s32 ret; + + if (sxe_is_irq_intx_mode()) { + adapter->cap &= ~SXE_MSI_ENABLED; + sxe_irq_non_msix_configure(adapter); + LOG_INFO("the intx ctrl param enable\n"); + goto l_end; + } + + if (sxe_is_irq_msi_mode()) { + LOG_INFO("the msi ctrl param enable\n"); + goto l_msi; + } + + ret = sxe_msix_irq_init(adapter); + if (!ret) { + goto l_end; + + } else { + LOG_WARN_BDF("msix-x irq init fail (err:%d), try msi irq.\n", ret); + } + +l_msi: + sxe_irq_non_msix_configure(adapter); + + ret = sxe_msi_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("msi irq init fail.(err:%d) " + "use legacy(intx) irq cap:0x%x\n", + ret, adapter->cap); + } + +l_end: + return; +} + +static void sxe_irq_data_free(struct sxe_adapter *adapter, + u16 irq_idx) +{ + u16 idx; + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxe_ring *ring; + + sxe_for_each_ring(ring, irq_data->tx.list) { + adapter->tx_ring_ctxt.ring[ring->idx] = NULL; + } + + if (irq_data->tx.xdp_ring) { + idx = irq_data->tx.xdp_ring->idx; + adapter->xdp_ring_ctxt.ring[idx] = NULL; + } + + sxe_for_each_ring(ring, irq_data->rx.list) { + adapter->rx_ring_ctxt.ring[ring->idx] = NULL; + } + + adapter->irq_ctxt.irq_data[irq_idx] = NULL; + netif_napi_del(&irq_data->napi); + +#ifdef HAVE_XDP_SUPPORT + if (static_key_enabled(&sxe_xdp_tx_lock_key)) { + static_branch_dec(&sxe_xdp_tx_lock_key); + } +#endif + + kfree_rcu(irq_data, rcu); + + return; +} + +static void sxe_all_irq_data_free(struct sxe_adapter *adapter) +{ + u16 irq_idx = adapter->irq_ctxt.ring_irq_num; + + while (irq_idx--) { + sxe_irq_data_free(adapter, irq_idx); + } + + return; +} + +static int sxe_irq_data_alloc(struct sxe_adapter *adapter, + u16 total_count, u16 irq_idx, bool is_bind) +{ + int ret = 0; + s32 node = dev_to_node(&adapter->pdev->dev); + s32 cpu = -1; + struct sxe_irq_data *irq_data; + + if (is_bind) { + cpu = cpumask_local_spread(irq_idx, node); + node = cpu_to_node(cpu); + } + + irq_data = kzalloc_node(struct_size(irq_data, ring, total_count), + GFP_KERNEL, node); + if (irq_data == NULL) { + LOG_ERROR_BDF("alloc interrupt data and ring resource in node:%d " + "failed, try remote. size: %zu irq_idx:%u " + "ring count:%u.(err:%d)\n", + node, struct_size(irq_data, ring, total_count), + irq_idx, total_count, ret); + + irq_data = kzalloc(struct_size(irq_data, ring, total_count), + GFP_KERNEL); + if (irq_data == NULL) { + ret = -ENOMEM; + LOG_ERROR_BDF("alloc interrupt data and ring resource " + "failed again. size: %zu irq_idx:%u " + "ring count:%u.(err:%d)\n", + struct_size(irq_data, ring, total_count), + irq_idx, total_count, ret); + goto l_out; + } + } + + if (cpu != -1) { + cpumask_set_cpu(cpu, &irq_data->affinity_mask); + } + + irq_data->numa_node = node; + +#ifdef SXE_TPH_CONFIGURE + irq_data->cpu = -1; +#endif + + netif_napi_add(adapter->netdev, &irq_data->napi, sxe_poll, + SXE_NAPI_WEIGHT); + + adapter->irq_ctxt.irq_data[irq_idx] = irq_data; + irq_data->adapter = adapter; + irq_data->irq_idx = irq_idx; + + irq_data->tx.work_limit = SXE_TX_WORK_LIMIT; + + LOG_INFO_BDF("irq_idx:%u ring_cnt:%u is_bind:%d bind to cpu:%d node:%d. " + "tx work_limit:%d \n", + irq_idx, total_count, + is_bind, cpu, node, + irq_data->tx.work_limit); + +l_out: + return ret; +} + +void sxe_napi_disable(struct sxe_adapter *adapter) +{ + u32 i; + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + napi_disable(&adapter->irq_ctxt.irq_data[i]->napi); + } + + return ; +} + +static void sxe_napi_enable_all(struct sxe_adapter * adapter) +{ + u16 irq_idx; + + for (irq_idx = 0; irq_idx < adapter->irq_ctxt.ring_irq_num; irq_idx++) { + napi_enable(&(adapter->irq_ctxt.irq_data[irq_idx]->napi)); + } + + return; +} + +static void sxe_irq_interval_init(struct sxe_irq_context *irq_ctxt, + u16 irq_idx, u16 txr_cnt, u16 rxr_cnt) +{ + struct sxe_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + + if (txr_cnt && !rxr_cnt) { + if (irq_ctxt->tx_irq_interval == 1) { + irq_data->irq_interval = SXE_IRQ_ITR_12K; + } else { + irq_data->irq_interval = irq_ctxt->tx_irq_interval; + } + } else { + if (irq_ctxt->rx_irq_interval == 1) { + irq_data->irq_interval = SXE_IRQ_ITR_20K; + } else { + irq_data->irq_interval = irq_ctxt->rx_irq_interval; + } + } + + irq_data->tx.irq_rate.irq_interval = SXE_IRQ_ITR_MAX + | SXE_IRQ_ITR_LATENCY; + irq_data->rx.irq_rate.irq_interval = SXE_IRQ_ITR_MAX + | SXE_IRQ_ITR_LATENCY; + + LOG_INFO("irq_idx:%u irq level interval:%u " + "list level rx irq interval:%u tx irq interval:%u\n", + irq_idx, irq_data->irq_interval, + irq_data->rx.irq_rate.irq_interval, + irq_data->tx.irq_rate.irq_interval); + return; +} + +#ifdef HAVE_AF_XDP_ZERO_COPY +static void sxe_set_ring_idx(struct sxe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + if (adapter->rx_ring_ctxt.ring[i]) { + adapter->rx_ring_ctxt.ring[i]->ring_idx = i; + } + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + if (adapter->tx_ring_ctxt.ring[i]) { + adapter->tx_ring_ctxt.ring[i]->ring_idx = i; + } + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + if (adapter->xdp_ring_ctxt.ring[i]) { + adapter->xdp_ring_ctxt.ring[i]->ring_idx = i; + } + } + + return; +} +#endif + +static s32 sxe_irq_ring_bind(struct sxe_adapter *adapter) +{ + s32 ret = 0; + u16 rxr_idx = 0; + u16 txr_idx = 0; + u16 xdp_idx = 0; + u16 irq_idx = 0; + u16 irq_num = adapter->irq_ctxt.ring_irq_num; + u16 rxr_remain = adapter->rx_ring_ctxt.num; + u16 txr_remain = adapter->tx_ring_ctxt.num; + u16 xdp_remain = adapter->xdp_ring_ctxt.num; + u16 total_ring = rxr_remain + txr_remain + xdp_remain; + bool is_bind = sxe_is_irq_bind_cpu(adapter); + + if (irq_num >= total_ring) { + for (; rxr_remain > 0; irq_idx++) { + ret = sxe_irq_data_alloc(adapter, 1, irq_idx, is_bind); + if (ret) { + LOG_ERROR_BDF("irq_num:%u rxr_remain:%u " + "txr_remain:%u xdp_remain:%u " + "irq_idx:%u alloc rx irq " + "resource priority fail.(err:%d)\n", + irq_num, rxr_remain, + txr_remain, xdp_remain, + irq_idx, ret); + goto l_error; + } + + sxe_irq_interval_init(&adapter->irq_ctxt, irq_idx, 0, 1); + + sxe_rx_ring_init(adapter, 0, 1, rxr_idx, irq_idx); + + rxr_remain--; + rxr_idx++; + } + LOG_INFO("alloc rx irq resource priority done.irq_idx:%u " + "rxr_idx:%u txr_remain:%u rxr_remain:%u xdp_remain:%u" + " ring_irq_num:%u total_ring:%u \n", + irq_idx, rxr_idx, txr_remain, rxr_remain, + xdp_remain, irq_num, total_ring); + } + + for (; irq_idx < irq_num; irq_idx++) { + u16 txr_cnt = DIV_ROUND_UP(txr_remain, irq_num - irq_idx); + + u16 xdp_cnt = DIV_ROUND_UP(xdp_remain, irq_num - irq_idx); + + u16 rxr_cnt = DIV_ROUND_UP(rxr_remain, irq_num - irq_idx); + + total_ring = txr_cnt + xdp_cnt + rxr_cnt; + + ret = sxe_irq_data_alloc(adapter, total_ring, irq_idx, is_bind); + if (ret) { + LOG_ERROR_BDF("irq_num:%u rxr_remain:%u txr_remain:%u " + "xdp_remain:%u rxr_cnt:%u txr_cnt:%u " + " xdp_cnt:%u ird_idx:%u alloc irq resource " + " fail.(err:%d)\n", + irq_num, rxr_remain, txr_remain, + xdp_remain, rxr_cnt, txr_cnt, + xdp_cnt, irq_idx, ret); + goto l_error; + } + + sxe_irq_interval_init(&adapter->irq_ctxt, irq_idx, txr_cnt, + rxr_cnt); + + sxe_tx_ring_init(adapter, 0, txr_cnt, txr_idx, irq_idx); + + sxe_xdp_ring_init(adapter, txr_cnt, xdp_cnt, xdp_idx, irq_idx); + + sxe_rx_ring_init(adapter, txr_cnt + xdp_cnt, rxr_cnt, + rxr_idx, irq_idx); + + txr_remain -= txr_cnt; + xdp_remain -= xdp_cnt; + rxr_remain -= rxr_cnt; + + txr_idx++; + xdp_idx += xdp_cnt; + rxr_idx++; + } + +#ifdef HAVE_AF_XDP_ZERO_COPY + sxe_set_ring_idx(adapter); +#endif + + return ret; + +l_error: + adapter->irq_ctxt.ring_irq_num = 0; + adapter->tx_ring_ctxt.num = 0; + adapter->rx_ring_ctxt.num = 0; + adapter->xdp_ring_ctxt.num = 0; + + while(irq_idx--) { + sxe_irq_data_free(adapter, irq_idx); + } + + return ret; + +} + +static void sxe_pci_irq_disable(struct sxe_adapter *adapter) +{ + if (adapter->cap & SXE_MSIX_ENABLED) { + adapter->cap &= ~SXE_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + SXE_KFREE(adapter->irq_ctxt.msix_entries); + } else if (adapter->cap & SXE_MSI_ENABLED) { + adapter->cap &= ~SXE_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } + + return; +} + +void sxe_hw_irq_disable(struct sxe_adapter *adapter) +{ + u16 i; + struct sxe_hw *hw = &adapter->hw; + struct sxe_irq_context *irq = &adapter->irq_ctxt; + + hw->irq.ops->all_irq_disable(hw); + + if (adapter->cap & SXE_MSIX_ENABLED) { + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + synchronize_irq(irq->msix_entries[i].vector); + } + synchronize_irq(irq->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } + + return; +} + +void sxe_irq_release(struct sxe_adapter *adapter) +{ + u16 irq_idx; + struct sxe_irq_context *irq_ctxt = &adapter->irq_ctxt; + + if (!irq_ctxt->ring_irq_num) { + goto l_out; + } + + if (!(adapter->cap & SXE_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + goto l_out; + } + + if (!irq_ctxt->msix_entries) { + goto l_out; + } + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + struct sxe_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + struct msix_entry *entry = &irq_ctxt->msix_entries[irq_idx]; + + if (!irq_data->rx.list.next && + !irq_data->tx.list.next && + !irq_data->tx.xdp_ring) { + continue; + } + + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, irq_data); + } + + free_irq(irq_ctxt->msix_entries[irq_idx].vector, adapter); + +l_out: + LOG_INFO_BDF("adapter cap:0x%x ring_irq_num:%u irq unregister done.", + adapter->cap, irq_ctxt->ring_irq_num); + return; +} + +s32 sxe_irq_ctxt_init(struct sxe_adapter *adapter) +{ + s32 ret; + + sxe_irq_num_init(adapter); + + sxe_irq_mode_init(adapter); + + ret = sxe_irq_ring_bind(adapter); + if (ret) { + LOG_DEV_ERR("interrupt and ring bind fail.(err:%d)\n", ret); + goto l_disable_irq; + } + + return ret; + +l_disable_irq: + sxe_pci_irq_disable(adapter); + return ret; + +} + +void sxe_irq_ctxt_exit(struct sxe_adapter *adapter) +{ + + sxe_all_irq_data_free(adapter); + + sxe_pci_irq_disable(adapter); + + adapter->irq_ctxt.ring_irq_num = 0; + adapter->tx_ring_ctxt.num = 0; + adapter->rx_ring_ctxt.num = 0; + adapter->xdp_ring_ctxt.num = 0; + + return; +} + +static bool sxe_set_irq_name(struct sxe_irq_data *irq_data, + char *dev_name, + u16 *rx_idx, u16 *tx_idx) +{ + if (irq_data->tx.list.next && + irq_data->rx.list.next) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-TxRx-%u", dev_name, (*rx_idx)++); + (*tx_idx)++; + } else if (irq_data->rx.list.next) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-Rx-%u", dev_name, (*rx_idx)++); + } else if (irq_data->tx.list.next || + irq_data->tx.xdp_ring) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-Tx-%u", dev_name, (*tx_idx)++); + } else { + LOG_INFO("%u irq has no ring bind.\n", irq_data->irq_idx); + return false; + } + + return true; +} + +STATIC irqreturn_t sxe_msix_ring_irq_handler(int irq, void *data) +{ + struct sxe_irq_data *irq_data = data; + + if (irq_data->tx.list.next || + irq_data->rx.list.next || + irq_data->tx.xdp_ring) { + napi_schedule_irqoff(&irq_data->napi); + } + + return IRQ_HANDLED ; + +} + +void sxe_lsc_irq_handler(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + adapter->stats.sw.link_state_change_cnt++; + set_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + LOG_DEBUG("lsc irq: trigger link_check subtask\n"); + + adapter->link.check_timeout = jiffies; + if (!test_bit(SXE_DOWN, &adapter->state)) { + hw->irq.ops->specific_irq_disable(hw, SXE_EIMC_LSC); + sxe_monitor_work_schedule(adapter); + LOG_DEBUG_BDF("lsc: monitor schedule\n"); + } + + return; +} + +void sxe_mailbox_irq_handler(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + unsigned long flags; + u8 vf_idx; + + LOG_DEBUG_BDF("rcv mailbox irq.num_vfs:%u.\n", adapter->vt_ctxt.num_vfs); + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (vf_idx = 0; vf_idx < adapter->vt_ctxt.num_vfs; vf_idx++) { + + if (hw->mbx.ops->rst_check(hw, vf_idx)) { + LOG_INFO("vf_idx:%d flr triggered.\n", vf_idx); + sxe_vf_hw_rst(adapter, vf_idx); + } + + if (hw->mbx.ops->req_check(hw, vf_idx)) { + sxe_vf_req_task_handle(adapter, vf_idx); + } + + if (hw->mbx.ops->ack_check(hw, vf_idx)) { + sxe_vf_ack_task_handle(adapter, vf_idx); + } + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + return; +} + +STATIC void sxe_fnav_irq_handler(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u32 reinit_count = 0; + u32 i; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + struct sxe_ring *ring = adapter->tx_ring_ctxt.ring[i]; + if (test_and_clear_bit(SXE_TX_FNAV_INIT_DONE, + &ring->state)) { + reinit_count++; + } + } + + LOG_INFO_BDF("adapter[%p] fnav reinit, count=%u\n",adapter, reinit_count); + + if (reinit_count) { + hw->irq.ops->specific_irq_disable(hw, SXE_EIMC_FLOW_NAV); + set_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state); + sxe_monitor_work_schedule(adapter); + } + + return; +} + +static void sxe_sfp_irq_handler(struct sxe_adapter *adapter, u32 eicr) +{ + struct sxe_hw *hw = &adapter->hw; + + if (!sxe_is_sfp(adapter)) { + return; + } + + if (eicr & SXE_EICR_GPI_SPP2) { + hw->irq.ops->pending_irq_write_clear(hw, SXE_EICR_GPI_SPP2); + if (!test_bit(SXE_DOWN, &adapter->state)) { + set_bit(SXE_SFP_NEED_RESET, &adapter->monitor_ctxt.state); + adapter->link.sfp_reset_timeout = 0; + LOG_DEV_WARN("sfp is inserted into slot, " + "trigger sfp_reset subtask\n"); + sxe_monitor_work_schedule(adapter); + } + } + + if (eicr & SXE_EICR_GPI_SPP1) { + hw->irq.ops->pending_irq_write_clear(hw, SXE_EICR_GPI_SPP1); + if (!test_bit(SXE_DOWN, &adapter->state) && + !test_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state)) { + if (time_after(jiffies, adapter->link.last_lkcfg_time + +#ifdef SXE_SFP_DEBUG + (HZ * sw_sfp_los_delay_ms) / SXE_HZ_TRANSTO_MS) +#else + (HZ * SXE_SW_SFP_LOS_DELAY_MS) / SXE_HZ_TRANSTO_MS) +#endif + ) { + adapter->link.last_lkcfg_time = jiffies; + set_bit(SXE_LINK_NEED_CONFIG, &adapter->monitor_ctxt.state); + LOG_MSG_INFO(hw, "sfp optical signal level below standard, " + "trigger link_config subtask\n"); + sxe_monitor_work_schedule(adapter); + } + } + } +} + +static void sxe_event_irq_common_handler(struct sxe_adapter *adapter, u32 eicr) +{ + if (eicr & SXE_EICR_HDC) { + sxe_hdc_irq_handler(adapter); + } + + if (eicr & SXE_EICR_LSC) { + sxe_lsc_irq_handler(adapter); + } + + if (eicr & SXE_EICR_MAILBOX) + sxe_mailbox_irq_handler(adapter); + + if (eicr & SXE_EICR_ECC) { + LOG_MSG_WARN(link, "ecc interrupt triggered eicr:0x%x.\n", + eicr); + } + + if (eicr & SXE_EICR_FLOW_NAV) { + sxe_fnav_irq_handler(adapter); + } + + if ((eicr & SXE_EICR_GPI_SPP1) || (eicr & SXE_EICR_GPI_SPP2)) { + sxe_sfp_irq_handler(adapter, eicr); + } + + return; +} + +STATIC irqreturn_t sxe_msix_event_irq_handler(int irq, void *data) +{ + struct sxe_adapter *adapter = data; + struct sxe_hw *hw = &adapter->hw; + unsigned long flags; + u32 eicr; + + spin_lock_irqsave(&adapter->irq_ctxt.event_irq_lock, flags); + + hw->irq.ops->specific_irq_disable(hw, 0xFFFF0000); + mb(); + + eicr = hw->irq.ops->irq_cause_get(hw); + + eicr &= 0xFFFF0000; + + hw->irq.ops->pending_irq_write_clear(hw, eicr); + + spin_unlock_irqrestore(&adapter->irq_ctxt.event_irq_lock, flags); + + sxe_event_irq_common_handler(adapter, eicr); + + if (!test_bit(SXE_DOWN, &adapter->state)) { + sxe_enable_irq(adapter, false, false); + } + + LOG_INFO("rcv event irq:%d eicr:0x%x.\n", irq, eicr); + + return IRQ_HANDLED; +} + +STATIC irqreturn_t sxe_non_msix_irq_handler(int irq, void *data) +{ + struct sxe_adapter *adapter = data; + struct sxe_hw *hw = &adapter->hw; + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + u32 eicr; + + hw->irq.ops->specific_irq_disable(hw, SXE_IRQ_CLEAR_MASK); + + eicr = hw->irq.ops->pending_irq_read_clear(hw); + if (!eicr) { + if (!test_bit(SXE_DOWN, &adapter->state)) { + sxe_enable_irq(adapter, true, true); + } + return IRQ_NONE; + } + + sxe_event_irq_common_handler(adapter, eicr); + + napi_schedule_irqoff(&irq_data->napi); + + if (!test_bit(SXE_DOWN, &adapter->state)) { + sxe_enable_irq(adapter, false, false); + } + + return IRQ_HANDLED; +} + +static int sxe_msix_request_irqs(struct sxe_adapter *adapter) +{ + int ret; + u16 rx_idx = 0; + u16 tx_idx = 0; + u16 irq_idx; + struct sxe_irq_data *irq_data; + struct msix_entry *entry; + struct net_device *netdev = adapter->netdev; + struct sxe_irq_context *irq_ctxt = &adapter->irq_ctxt; + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + irq_data = irq_ctxt->irq_data[irq_idx]; + entry = &irq_ctxt->msix_entries[irq_idx]; + + if (!(sxe_set_irq_name(irq_data, netdev->name, + &rx_idx, &tx_idx))) { + continue; + } + + ret = request_irq(entry->vector, &sxe_msix_ring_irq_handler, 0, + irq_data->name, irq_data); + if (ret) { + LOG_MSG_ERR(probe, "irq_idx:%u rx_idx:%u tx_idx:%u irq_num:%u " + "vector:%u msi-x ring interrupt " + "reuqest fail.(err:%d)\n", + irq_idx, rx_idx, tx_idx, + irq_ctxt->ring_irq_num, + entry->vector, ret); + goto l_free_irq; + } + + if (adapter->cap & SXE_FNAV_SAMPLE_ENABLE) { + irq_set_affinity_hint(entry->vector, + &irq_data->affinity_mask); + } + } + + ret = request_irq(irq_ctxt->msix_entries[irq_idx].vector, + sxe_msix_event_irq_handler, 0, netdev->name, adapter); + if (ret) { + LOG_MSG_ERR(probe, "irq_idx:%u vector:%u msi-x event interrupt " + "reuqest fail.(err:%d)\n", + irq_idx, + irq_ctxt->msix_entries[irq_idx].vector, ret); + goto l_free_irq; + } + + return ret; + +l_free_irq: + while (irq_idx) { + irq_idx--; + irq_set_affinity_hint(adapter->irq_ctxt.msix_entries[irq_idx].vector, NULL); + free_irq(irq_ctxt->msix_entries[irq_idx].vector, + irq_ctxt->irq_data[irq_idx]); + } + + + adapter->cap &= ~SXE_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + + SXE_KFREE(adapter->irq_ctxt.msix_entries); + + return ret; +} + +STATIC int sxe_request_irq(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int ret; + + if (adapter->cap & SXE_MSIX_ENABLED) { + LOG_INFO("request msi-x interrupt.\n"); + ret = sxe_msix_request_irqs(adapter); + } else if (adapter->cap & SXE_MSI_ENABLED) { + LOG_INFO("request msi interrupt.\n"); + ret = request_irq(adapter->pdev->irq, sxe_non_msix_irq_handler, + 0, netdev->name, adapter); + } else { + LOG_INFO("request legacy interrupt.\n"); + ret = request_irq(adapter->pdev->irq, sxe_non_msix_irq_handler, + IRQF_SHARED, netdev->name, adapter); + } + + return ret; +} + +static void sxe_configure_msix_hw(struct sxe_adapter *adapter) +{ + u16 irq_idx; + u32 value; + struct sxe_hw *hw = &adapter->hw; + struct sxe_ring *ring; + struct sxe_irq_context *irq_ctxt = &adapter->irq_ctxt; + + if (adapter->vt_ctxt.num_vfs > 32) { + u32 sel_value = BIT(adapter->vt_ctxt.num_vfs - 32) - 1; + hw->irq.ops->set_eitrsel(hw, sel_value); + } + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + struct sxe_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + + sxe_for_each_ring(ring, irq_data->rx.list) { + + hw->irq.ops->ring_irq_map(hw, false, ring->reg_idx, + irq_idx); + } + + sxe_for_each_ring(ring, irq_data->tx.list) { + hw->irq.ops->ring_irq_map(hw, true, ring->reg_idx, + irq_idx); + } + + if (irq_data->tx.xdp_ring) { + hw->irq.ops->ring_irq_map(hw, true, + irq_data->tx.xdp_ring->reg_idx, + irq_idx); + } + + hw->irq.ops->ring_irq_interval_set(hw, irq_idx, irq_data->irq_interval); + } + + hw->irq.ops->event_irq_map(hw, 1, irq_idx); + + hw->irq.ops->event_irq_interval_set(hw, irq_idx, 1950); + + + value = SXE_EIMS_ENABLE_MASK; + value &= ~(SXE_EIMS_OTHER | SXE_EIMS_MAILBOX | SXE_EIMS_LSC); + hw->irq.ops->event_irq_auto_clear_set(hw, value); + + return; +} + +static void sxe_configure_non_msix_hw(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + + hw->irq.ops->ring_irq_interval_set(hw, 0, irq_data->irq_interval); + + hw->irq.ops->ring_irq_map(hw, false, 0, 0); + hw->irq.ops->ring_irq_map(hw, true, 0, 0); + + LOG_MSG_INFO(hw, "non msix interrupt ivar setup done.\n"); + + return; +} + +static void sxe_irq_general_configure(struct sxe_adapter *adapter) +{ + u32 gpie = 0; + struct sxe_hw *hw = &adapter->hw; + u32 pool_mask = sxe_pool_mask_get(adapter); + + if (adapter->cap & SXE_MSIX_ENABLED) { + gpie = SXE_GPIE_MSIX_MODE | SXE_GPIE_PBA_SUPPORT | + SXE_GPIE_OCD | SXE_GPIE_EIAME; + + hw->irq.ops->ring_irq_auto_disable(hw, true); + } else { + hw->irq.ops->ring_irq_auto_disable(hw, false); + } + + if (adapter->cap & SXE_SRIOV_ENABLE) { + gpie &= ~SXE_GPIE_VTMODE_MASK; + switch (pool_mask) { + case SXE_8Q_PER_POOL_MASK: + gpie |= SXE_GPIE_VTMODE_16; + break; + case SXE_4Q_PER_POOL_MASK: + gpie |= SXE_GPIE_VTMODE_32; + break; + default: + gpie |= SXE_GPIE_VTMODE_64; + break; + } + } + + gpie |= SXE_GPIE_SPP1_EN | SXE_GPIE_SPP2_EN; + + hw->irq.ops->irq_general_reg_set(hw, gpie); + + return; +} + +void sxe_hw_irq_configure(struct sxe_adapter *adapter) +{ + s32 ret = 0; + struct sxe_hw *hw = &adapter->hw; + + hw->irq.ops->spp_configure(hw, +#ifdef SXE_SFP_DEBUG + hw_spp_proc_delay_us); +#else + SXE_SPP_PROC_DELAY_US); +#endif + + sxe_irq_general_configure(adapter); + + if (adapter->cap & SXE_MSIX_ENABLED) { + sxe_configure_msix_hw(adapter); + } else { + sxe_configure_non_msix_hw(adapter); + } + + if (adapter->phy_ctxt.ops->sfp_tx_laser_enable) { + adapter->phy_ctxt.ops->sfp_tx_laser_enable(adapter); + } + + smp_mb__before_atomic(); + clear_bit(SXE_DOWN, &adapter->state); + + sxe_napi_enable_all(adapter); + + if (sxe_is_sfp(adapter)) { + sxe_sfp_reset_task_submit(adapter); + } else { + ret = sxe_link_config(adapter); + if(ret) { + LOG_MSG_ERR(probe, "sxe_link_config failed %d\n", ret); + } + } + + hw->irq.ops->pending_irq_read_clear(hw); + + sxe_enable_irq(adapter, true, true); + + return; +} + +int sxe_irq_configure(struct sxe_adapter *adapter) +{ + int ret; + + ret = sxe_request_irq(adapter); + if (ret) { + LOG_MSG_ERR(probe, "interrupt mode:0x%x request irq failed, (err:%d)\n", + adapter->cap, ret); + goto l_out; + } + + sxe_hw_irq_configure(adapter); + +l_out: + return ret; +} + +static bool sxe_lro_status_update(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 itr = adapter->irq_ctxt.rx_irq_interval; + bool ret = false; + + if (!(adapter->cap & SXE_LRO_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) { + LOG_INFO("lro disable status, no need judge itr interval.\n"); + goto l_out; + } + + if ((itr == 1) || (itr > SXE_IRQ_LRO_ITR_MIN)) { + if (!(adapter->cap & SXE_LRO_ENABLE)) { + adapter->cap |= SXE_LRO_ENABLE; + ret = true; + LOG_MSG_INFO(probe, "user itr:%u large than lro delay:%u, " + "enable lro.\n", itr, SXE_IRQ_LRO_ITR_MIN); + } + } else if (adapter->cap & SXE_LRO_ENABLE) { + adapter->cap &= ~SXE_LRO_ENABLE; + ret = true; + LOG_MSG_INFO(probe, "user itr:%u less than lro delay:%u, disable lro.\n", + itr, SXE_IRQ_LRO_ITR_MIN); + } + +l_out: + return ret; +} + +s32 sxe_irq_coalesce_get(struct net_device * netdev, + struct ethtool_coalesce *user) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + u16 rx_itr = adapter->irq_ctxt.rx_irq_interval; + u16 tx_itr = adapter->irq_ctxt.tx_irq_interval; + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + bool is_mixed; + s32 ret = 0; + + + if (irq_data->tx.list.cnt && irq_data->rx.list.cnt) { + is_mixed = true; + } else { + is_mixed = false; + } + + if (rx_itr == SXE_IRQ_ITR_CONSTANT_MODE_VALUE) { + user->rx_coalesce_usecs = SXE_IRQ_ITR_CONSTANT_MODE_VALUE; + } else { + user->rx_coalesce_usecs = rx_itr >> SXE_EITR_ITR_SHIFT; + } + + if (is_mixed) { + LOG_INFO("interrupt 0 has both rx and tx ring, " + "just report rx itr:%u.\n", + user->rx_coalesce_usecs); + goto l_out; + } + + if (tx_itr == SXE_IRQ_ITR_CONSTANT_MODE_VALUE) { + user->tx_coalesce_usecs = SXE_IRQ_ITR_CONSTANT_MODE_VALUE; + } else { + user->tx_coalesce_usecs = tx_itr >> SXE_EITR_ITR_SHIFT; + } + + LOG_INFO("rx irq interval:%u tx irq interval:%u.\n", + rx_itr, tx_itr); + +l_out: + return ret; +} + +s32 sxe_irq_coalesce_set(struct net_device *netdev, struct ethtool_coalesce *user) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + u16 tx_itr; + u16 rx_itr; + u16 tx_itr_old; + u8 i; + bool is_mixed; + bool need_rst = false; + u32 itr_max = SXE_EITR_ITR_MAX; + s32 ret = 0; + + if ((user->rx_coalesce_usecs > itr_max) || + (user->tx_coalesce_usecs > itr_max)) { + ret = -EINVAL; + LOG_ERROR_BDF("user param invalid, rx_coalesce_usecs:%u" + "tx_coalesce_usecs:%u max:%u.(err:%d)\n", + user->rx_coalesce_usecs, + user->tx_coalesce_usecs, + itr_max, ret); + goto l_out; + } + + if (irq_data->tx.list.cnt && irq_data->rx.list.cnt) { + is_mixed = true; + } else { + is_mixed = false; + } + + if (is_mixed) { + if (user->tx_coalesce_usecs) { + ret = -EINVAL; + LOG_ERROR_BDF("irq_idx:0 bind tx ring cnt:%u rx ring cnt:%u" + "tx_coalesce_usecs:%u rx_coalesce_usecs:%u." + "(err:%d)\n ", + irq_data->tx.list.cnt, + irq_data->rx.list.cnt, + user->tx_coalesce_usecs, + user->rx_coalesce_usecs, + ret); + goto l_out; + } + tx_itr_old = adapter->irq_ctxt.rx_irq_interval; + } else { + tx_itr_old = adapter->irq_ctxt.tx_irq_interval; + } + + if (user->rx_coalesce_usecs == SXE_IRQ_ITR_CONSTANT_MODE_VALUE) { + adapter->irq_ctxt.rx_irq_interval = SXE_IRQ_ITR_CONSTANT_MODE_VALUE; + rx_itr = SXE_IRQ_ITR_20K; + } else { + adapter->irq_ctxt.rx_irq_interval = user->rx_coalesce_usecs << SXE_EITR_ITR_SHIFT; + rx_itr = adapter->irq_ctxt.rx_irq_interval; + } + + if (user->tx_coalesce_usecs == SXE_IRQ_ITR_CONSTANT_MODE_VALUE) { + adapter->irq_ctxt.tx_irq_interval = SXE_IRQ_ITR_CONSTANT_MODE_VALUE; + tx_itr = SXE_IRQ_ITR_12K; + } else { + adapter->irq_ctxt.tx_irq_interval = user->tx_coalesce_usecs << SXE_EITR_ITR_SHIFT; + tx_itr = adapter->irq_ctxt.tx_irq_interval; + } + + if (is_mixed) { + adapter->irq_ctxt.tx_irq_interval = adapter->irq_ctxt.rx_irq_interval; + } + + if (!!adapter->irq_ctxt.tx_irq_interval != !!tx_itr_old) { + need_rst = true; + } + + need_rst |= sxe_lro_status_update(adapter); + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + irq_data = adapter->irq_ctxt.irq_data[i]; + if (irq_data->tx.list.cnt && !irq_data->rx.list.cnt) { + irq_data->irq_interval = tx_itr; + } else { + irq_data->irq_interval = rx_itr; + } + + hw->irq.ops->ring_irq_interval_set(hw, i, irq_data->irq_interval); + } + + if (need_rst) { + sxe_do_reset(netdev); + } + + LOG_INFO_BDF("user tx_coalesce_usecs:%u rx_coalesce_usecs:%u " + "adapter tx_irq_interval:%u rx_irq_interval:%u " + "tx_itr:%u rx_itr:%u need_rst:%u is_misxed:%u.\n", + user->tx_coalesce_usecs, + user->rx_coalesce_usecs, + adapter->irq_ctxt.tx_irq_interval, + adapter->irq_ctxt.rx_irq_interval, + tx_itr, + rx_itr, + need_rst, + is_mixed); + +l_out: + return ret; +} + +static void sxe_packet_size_cal(u32 *size) +{ + if ((*size) <= 60) { + (*size) = 5120; + } else if ((*size) <= 316) { + (*size) *= 40; + (*size) += 2820; + } else if ((*size) <= 1084) { + (*size) *= 15; + (*size) += 11452; + } else if ((*size) < 1968) { + (*size) *= 5; + (*size) += 22420; + } else { + (*size) = 32256; + } + + return; +} + +static u32 sxe_itr_rate_cal(u32 size, u32 link_speed) +{ + u32 ret = 0; + + switch (link_speed) { + case SXE_LINK_SPEED_10_FULL: + case SXE_LINK_SPEED_1GB_FULL: + size = (size > 8064) ? 8064 : size; + ret += DIV_ROUND_UP(size, SXE_IRQ_ITR_INC_MIN * 64) * + SXE_IRQ_ITR_INC_MIN; + break; + + default: + ret += DIV_ROUND_UP(size, SXE_IRQ_ITR_INC_MIN * 256) * + SXE_IRQ_ITR_INC_MIN; + break; + } + + return ret; +} + +static void sxe_irq_interval_update(struct sxe_irq_data *irq_data, + struct sxe_irq_rate *rate) +{ + u32 itr; + u32 size; + u32 packets = rate->total_packets; + u32 bytes = rate->total_bytes; + u16 old_itr = irq_data->irq_interval; + u8 old_itr_tmp = rate->irq_interval; + unsigned long cur = jiffies; + + if (time_after(cur, rate->next_update)) { + itr = SXE_IRQ_ITR_MIN | SXE_IRQ_ITR_LATENCY; + goto update; + } + + if (!packets) { + itr = (old_itr >> 2) + SXE_IRQ_ITR_INC_MIN; + itr = (itr > SXE_IRQ_ITR_MAX) ? SXE_IRQ_ITR_MAX : itr; + itr += (old_itr_tmp & SXE_IRQ_ITR_LATENCY); + goto update; + } else if ((packets < SXE_IRQ_ITR_PKT_4) && (bytes < SXE_IRQ_ITR_BYTES_9000)) { + itr = SXE_IRQ_ITR_LATENCY; + goto adjust; + } else { + if (packets < SXE_IRQ_ITR_PKT_48) { + itr = (old_itr >> 2) + SXE_IRQ_ITR_INC_MIN; + itr = (itr > SXE_IRQ_ITR_MAX) ? SXE_IRQ_ITR_MAX : itr; + } else if (packets < SXE_IRQ_ITR_PKT_96) { + itr = old_itr >> 2; + } else if (packets < SXE_IRQ_ITR_PKT_256) { + itr = old_itr >> 3; + itr = (itr < SXE_IRQ_ITR_MIN) ? SXE_IRQ_ITR_MIN : itr; + } else { + itr = SXE_IRQ_ITR_BULK; + goto adjust; + } + + goto update; + } + +adjust: + size = bytes / packets; + sxe_packet_size_cal(&size); + + if (itr & SXE_IRQ_ITR_LATENCY) { + size >>= 1; + } + + itr += sxe_itr_rate_cal(size, irq_data->adapter->link.speed); + +update: + rate->irq_interval = itr; + rate->next_update = cur + 1; + rate->total_bytes = 0; + rate->total_packets = 0; + + return; +} + +static void sxe_irq_rate_adjust(struct sxe_irq_data *irq_data) +{ + u32 irq_data_itr; + struct sxe_irq_rate *tx_rate = &irq_data->tx.irq_rate; + struct sxe_irq_rate *rx_rate = &irq_data->rx.irq_rate; + struct sxe_adapter *adapter = irq_data->adapter; + struct sxe_hw *hw = &adapter->hw; + + if (irq_data->tx.list.cnt) { + sxe_irq_interval_update(irq_data, tx_rate); + } + + if (irq_data->rx.list.cnt) { + sxe_irq_interval_update(irq_data, rx_rate); + } + + irq_data_itr = min(tx_rate->irq_interval, rx_rate->irq_interval); + irq_data_itr &= ~SXE_IRQ_ITR_LATENCY; + + irq_data_itr <<= SXE_EITR_ITR_SHIFT; + + if (irq_data_itr != irq_data->irq_interval) { + irq_data->irq_interval = irq_data_itr; + hw->irq.ops->ring_irq_interval_set(hw, irq_data->irq_idx, + irq_data->irq_interval); + } + + return; +} + +int sxe_poll(struct napi_struct *napi, int weight) +{ + struct sxe_ring *ring; + bool clean_complete = true; + struct sxe_irq_data *irq_data = container_of(napi, + struct sxe_irq_data, napi); + struct sxe_adapter *adapter = irq_data->adapter; + struct sxe_hw *hw = &adapter->hw; + s32 per_ring_budget; + u32 cleaned; + s32 total_cleaned = 0; + +#ifdef SXE_TPH_CONFIGURE + if (adapter->cap & SXE_TPH_ENABLE){ + sxe_tph_update(irq_data); + } +#endif + + sxe_for_each_ring(ring, irq_data->tx.list) { + clean_complete = + sxe_tx_ring_irq_clean(irq_data, ring, weight); + + LOG_DEBUG_BDF("tx ring[%u] clean_complete:%s\n", ring->idx, + clean_complete ? "true" : "false"); + } +#ifdef HAVE_AF_XDP_ZERO_COPY + ring = irq_data->tx.xdp_ring; + if (ring) { + if (ring->xsk_pool) { + LOG_DEBUG_BDF("ring[%u] has xsk_umem, clean xdp tx irq\n", + ring->idx); + clean_complete = sxe_xdp_tx_ring_irq_clean(irq_data, ring, weight); + } + } +#endif + if (weight <= 0) { + LOG_DEBUG_BDF("weight:%d\n", weight); + return weight; + } + + per_ring_budget = max(weight / irq_data->rx.list.cnt, 1); + LOG_DEBUG_BDF("rings in irq=%u, per_ring_budget=%d\n", + irq_data->rx.list.cnt, per_ring_budget); + + sxe_for_each_ring(ring, irq_data->rx.list) { +#ifdef HAVE_AF_XDP_ZERO_COPY + cleaned = ring->xsk_pool ? + sxe_zc_rx_ring_irq_clean(irq_data, ring, + per_ring_budget) : + sxe_rx_ring_irq_clean(irq_data, ring, + per_ring_budget); +#else + cleaned = sxe_rx_ring_irq_clean(irq_data, ring, per_ring_budget); +#endif + total_cleaned += cleaned; + if (cleaned >= per_ring_budget) { + clean_complete = false; + } + LOG_DEBUG_BDF("ring[%u] %s cleaned = %u, total_cleaned = %u\n", + ring->idx, +#ifdef HAVE_AF_XDP_ZERO_COPY + ring->xsk_pool ? "xdp" : "", +#else + "", +#endif + cleaned, total_cleaned); + } + + if (false == clean_complete) { + LOG_DEBUG_BDF("not cleaned, rescheduling\n"); + return weight; + } + + if (likely(napi_complete_done(napi, total_cleaned))) { + if (adapter->irq_ctxt.rx_irq_interval == SXE_IRQ_ITR_CONSTANT_MODE_VALUE) { + sxe_irq_rate_adjust(irq_data); + } + + if (!test_bit(SXE_DOWN, &adapter->state)) { + hw->irq.ops->ring_irq_enable(hw, + BIT_ULL(irq_data->irq_idx)); + } + } + + return min(total_cleaned, (weight - 1)); +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.h new file mode 100644 index 000000000000..bb2c635fd7c2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_irq.h @@ -0,0 +1,126 @@ + +#ifndef __SXE_IRQ_H__ +#define __SXE_IRQ_H__ + +#include "sxe_ring.h" + +struct sxe_adapter; +struct ethtool_coalesce; + +#define SXE_MSIX_IRQ_MAX_NUM (64) +#define SXE_EVENT_IRQ_NUM (1) +#define SXE_RING_IRQ_MIN_NUM (1) +#define SXE_RING_IRQ_MAX_NUM (SXE_MSIX_IRQ_MAX_NUM) +#define SXE_MSIX_IRQ_MIN_NUM (SXE_EVENT_IRQ_NUM + SXE_RING_IRQ_MIN_NUM) + +#define SXE_PCIE_MSIX_CAPS_OFFSET (0xB2) +#define SXE_PCIE_MSIX_ENTRY_MASK (0x7FF) +#define SXE_NAPI_WEIGHT (64) + +#define SXE_IRQ_ITR_INC_MIN (2) +#define SXE_IRQ_ITR_MIN (10) +#define SXE_IRQ_ITR_MAX (126) +#define SXE_IRQ_ITR_LATENCY (0x80) +#define SXE_IRQ_ITR_BULK (0x00) +#define SXE_IRQ_ITR_MASK (0x00000FF8) + +#define SXE_IRQ_BULK (0) +#define SXE_IRQ_ITR_12K (336) +#define SXE_IRQ_ITR_20K (200) +#define SXE_IRQ_ITR_100K (40) + +#define SXE_IRQ_LRO_ITR_MIN (24) +#define SXE_IRQ_ITR_CONSTANT_MODE_VALUE (1) + +#define SXE_IRQ_ITR_PKT_4 4 +#define SXE_IRQ_ITR_PKT_48 48 +#define SXE_IRQ_ITR_PKT_96 96 +#define SXE_IRQ_ITR_PKT_256 256 +#define SXE_IRQ_ITR_BYTES_9000 9000 + +enum sxe_irq_mode { + SXE_IRQ_MSIX_MODE = 0, + SXE_IRQ_MSI_MODE, + SXE_IRQ_INTX_MODE, +}; + +struct sxe_irq_rate { + unsigned long next_update; + unsigned int total_bytes; + unsigned int total_packets; + u16 irq_interval; +}; + +struct sxe_list { + struct sxe_ring *next; + u8 cnt; +}; + +struct sxe_tx_context { + struct sxe_list list; + struct sxe_ring *xdp_ring; + struct sxe_irq_rate irq_rate; + u16 work_limit; +}; + +struct sxe_rx_context { + struct sxe_irq_rate irq_rate; + struct sxe_list list; +}; + +struct sxe_irq_data { + struct sxe_adapter *adapter; +#ifdef SXE_TPH_CONFIGURE + s32 cpu; +#endif + u16 irq_idx; + u16 irq_interval; + struct sxe_tx_context tx; + struct sxe_rx_context rx; + struct napi_struct napi; + cpumask_t affinity_mask; + s32 numa_node; + struct rcu_head rcu; + s8 name[IFNAMSIZ + 16]; + struct sxe_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +struct sxe_irq_context { + struct msix_entry *msix_entries; + struct sxe_irq_data *irq_data[SXE_RING_IRQ_MAX_NUM]; + spinlock_t event_irq_lock; + u16 max_irq_num; + u16 ring_irq_num; + u16 total_irq_num; + u16 rx_irq_interval; + u16 tx_irq_interval; +}; + +int sxe_irq_configure(struct sxe_adapter *adapter); + +int sxe_poll(struct napi_struct *napi, int weight); + +void sxe_napi_disable(struct sxe_adapter *adapter); + +void sxe_irq_release(struct sxe_adapter *adapter); + +void sxe_hw_irq_disable(struct sxe_adapter *adapter); + +void sxe_irq_ctxt_exit(struct sxe_adapter *adapter); + +s32 sxe_irq_ctxt_init(struct sxe_adapter *adapter); + +void sxe_hw_irq_configure(struct sxe_adapter *adapter); + +s32 sxe_config_space_irq_num_get(struct sxe_adapter *adapter); + +s32 sxe_irq_coalesce_set(struct net_device *netdev, struct ethtool_coalesce *user); + +s32 sxe_irq_coalesce_get(struct net_device * netdev, + struct ethtool_coalesce *user); + +bool sxe_is_irq_msi_mode(void); + +bool sxe_is_irq_intx_mode(void); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_main.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_main.c new file mode 100644 index 000000000000..1b35cd4ab6a0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_main.c @@ -0,0 +1,1765 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef SXE_TPH_CONFIGURE +#include +#endif + +#include "sxe_version.h" +#include "sxe_pci.h" +#include "sxe_ring.h" +#include "sxe.h" +#include "sxe_log.h" +#include "sxe_netdev.h" +#include "sxe_hw.h" +#include "sxe_irq.h" +#include "sxe_monitor.h" +#include "sxe_filter.h" +#include "sxe_debug.h" +#include "sxe_dcb.h" +#include "sxe_sriov.h" +#include "sxe_debugfs.h" +#include "sxe_phy.h" +#include "sxe_host_cli.h" +#include "sxe_host_hdc.h" +#include "sxe_ethtool.h" + +#ifdef SXE_DRIVER_TRACE +#include "sxe_trace.h" +#endif + +#define SXE_MSG_LEVEL_DEFAULT (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) + +#define SXE_DW1_OFFSET (4) +#define SXE_DW2_OFFSET (8) +#define SXE_DW3_OFFSET (12) +#define SXE_REQ_ID_SHIFT (16) +#define SXE_REQ_ID_VF_MASK (0x0080) +#define SXE_REQ_ID_PF_MASK (0x0001) +#define SXE_REQ_ID_VF_ID_MASK (0x007F) + +static struct workqueue_struct *sxe_workqueue; + +struct workqueue_struct *sxe_fnav_workqueue; +#define SXE_FNAV_NAME "sxe_fnav" + +struct kmem_cache *fnav_cache; + +static bool allow_inval_mac; +module_param(allow_inval_mac, bool, false); +MODULE_PARM_DESC(allow_inval_mac, + "Indicates device can be probed successfully or not when mac addr invalid."); + +bool sxe_allow_inval_mac(void) +{ + return !!allow_inval_mac; +} + +#ifndef SXE_DRIVER_RELEASE + +static int irq_mode; +module_param(irq_mode, int, 0); +MODULE_PARM_DESC(irq_mode, "select irq mode(0:MSIx(default), 1:MSI, 2:INTx)"); + +bool sxe_is_irq_msi_mode(void) +{ + return (irq_mode == SXE_IRQ_MSI_MODE) ? true : false; +} + +bool sxe_is_irq_intx_mode(void) +{ + return (irq_mode == SXE_IRQ_INTX_MODE) ? true : false; +} + +#else + +bool sxe_is_irq_msi_mode(void) +{ + return false; +} + +bool sxe_is_irq_intx_mode(void) +{ + return false; +} + +#endif + +#ifdef SXE_TPH_CONFIGURE +static void sxe_tx_tph_update(struct sxe_adapter *adapter, + struct sxe_ring *tx_ring, + int cpu) +{ + u8 tag = 0; + struct sxe_hw *hw = &adapter->hw; + + if (adapter->cap & SXE_TPH_ENABLE) { + tag = dca3_get_tag(tx_ring->dev, cpu); + } + + hw->dma.ops->tx_tph_update(hw, tx_ring->reg_idx, tag); + + return; +} + +static void sxe_rx_tph_update(struct sxe_adapter *adapter, + struct sxe_ring *rx_ring, + int cpu) +{ + u8 tag = 0; + struct sxe_hw *hw = &adapter->hw; + + if (adapter->cap & SXE_TPH_ENABLE) { + tag = dca3_get_tag(rx_ring->dev, cpu); + } + + hw->dma.ops->rx_tph_update(hw, rx_ring->reg_idx, tag); + + return; +} + +void sxe_tph_update(struct sxe_irq_data *irq_data) +{ + struct sxe_adapter *adapter = irq_data->adapter; + struct sxe_ring *ring; + int cpu = get_cpu(); + + if (irq_data->cpu == cpu) { + goto out_no_update; + } + + sxe_for_each_ring(ring, irq_data->tx.list) { + sxe_tx_tph_update(adapter, ring, cpu); + } + + if (irq_data->tx.xdp_ring) { + sxe_tx_tph_update(adapter, irq_data->tx.xdp_ring, cpu); + } + + sxe_for_each_ring(ring, irq_data->rx.list) { + sxe_rx_tph_update(adapter, ring, cpu); + } + + irq_data->cpu = cpu; + +out_no_update: + put_cpu(); + return; +} + +void sxe_tph_setup(struct sxe_adapter *adapter) +{ + int i; + struct sxe_hw *hw = &adapter->hw; + + if (adapter->cap & SXE_TPH_ENABLE) { + hw->dma.ops->tph_switch(hw, true); + } else { + hw->dma.ops->tph_switch(hw, false); + } + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + adapter->irq_ctxt.irq_data[i]->cpu = -1; + sxe_tph_update(adapter->irq_ctxt.irq_data[i]); + } + + return; +} + +static void sxe_tph_init(struct sxe_adapter *adapter) +{ + struct device *dev = &adapter->pdev->dev; + + if (dca_add_requester(dev) == 0) { + adapter->cap |= SXE_TPH_ENABLE; + sxe_tph_setup(adapter); + } + + return; +} + +static void sxe_tph_uninit(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct device *dev = &adapter->pdev->dev; + + if (adapter->cap & SXE_TPH_ENABLE) { + adapter->cap &= ~SXE_TPH_ENABLE; + dca_remove_requester(dev); + hw->dma.ops->tph_switch(hw, false); + } + + return; +} + +static int __sxe_tph_notify(struct device *dev, void *data) +{ + struct sxe_adapter *adapter = dev_get_drvdata(dev); + struct sxe_hw *hw = &adapter->hw; + unsigned long event = *(unsigned long *)data; + + if (!(adapter->cap & SXE_TPH_CAPABLE)) { + goto l_end; + } + + switch (event) { + case DCA_PROVIDER_ADD: + if (adapter->cap & SXE_TPH_ENABLE) { + break; + } + + if (dca_add_requester(dev) == 0) { + adapter->cap |= SXE_TPH_ENABLE; + hw->dma.ops->tph_switch(hw, true); + break; + } + fallthrough; + case DCA_PROVIDER_REMOVE: + if (adapter->cap & SXE_TPH_ENABLE) { + dca_remove_requester(dev); + adapter->cap &= ~SXE_TPH_ENABLE; + hw->dma.ops->tph_switch(hw, false); + } + break; + } + +l_end: + return 0; +} +#endif + +STATIC int sxe_config_dma_mask(struct sxe_adapter * adapter) +{ + int ret = 0; + + if (dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXE_DMA_BIT_WIDTH_64))) { + LOG_ERROR_BDF("device[pci_id %u] 64 dma mask and coherent set failed\n", + adapter->pdev->dev.id); + ret = dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXE_DMA_BIT_WIDTH_32)); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] 32 dma mask and coherent set failed\n", + adapter->pdev->dev.id); + } + } + + return ret; +} + +STATIC int sxe_pci_init(struct sxe_adapter * adapter) +{ + int ret; + size_t len; + resource_size_t bar_base_paddr; + struct pci_dev *pdev = adapter->pdev; + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_ERROR_BDF("device[pci_id %u] pci enable failed\n", pdev->dev.id); + goto l_pci_enable_device_mem_failed; + } + + ret = pci_request_mem_regions(pdev, SXE_DRV_NAME); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] request IO memory failed\n", pdev->dev.id); + goto l_pci_request_mem_failed; + } + +#ifndef DELETE_PCIE_ERROR_REPORTING + pci_enable_pcie_error_reporting(pdev); +#endif + pci_set_master(pdev); + pci_save_state(pdev); + + bar_base_paddr = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + adapter->hw.reg_base_addr = ioremap(bar_base_paddr, len); + if (!adapter->hw.reg_base_addr) { + ret = -EIO; + LOG_ERROR_BDF("device[pci_id %u] ioremap[bar_base_paddr = 0x%llx, len = %zu] failed\n", + pdev->dev.id, (u64)bar_base_paddr, len); + goto l_ioremap_failed; + } else { + pci_set_drvdata(pdev, adapter); + } + + LOG_INFO_BDF("bar_base_paddr = 0x%llx, len = %zu, reg_set_vaddr = 0x%p\n", + (u64)bar_base_paddr, len, adapter->hw.reg_base_addr); + return 0; + +l_ioremap_failed: +#ifndef DELETE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif + pci_release_mem_regions(pdev); +l_pci_request_mem_failed: + pci_disable_device(pdev); +l_pci_enable_device_mem_failed: + return ret; +} + +STATIC void sxe_pci_exit(struct sxe_adapter * adapter) +{ + bool disable_dev; + struct pci_dev *pdev = adapter->pdev; + + if (adapter->hw.reg_base_addr) { + iounmap(adapter->hw.reg_base_addr); + adapter->hw.reg_base_addr = NULL; + } + + disable_dev = !test_and_set_bit(SXE_DISABLED, &adapter->state); + + if (pci_is_enabled(pdev)) { +#ifndef DELETE_PCIE_ERROR_REPORTING + pci_disable_pcie_error_reporting(pdev); +#endif + pci_release_mem_regions(pdev); + if (disable_dev) { + pci_disable_device(pdev); + } + pci_set_drvdata(pdev, NULL); + } + + return; +} + +STATIC s32 sxe_get_mac_addr_from_fw(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_driver_cmd cmd; + struct sxe_default_mac_addr_resp mac; + struct sxe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + cmd.req = NULL; + cmd.req_len = 0; + cmd.resp = &mac; + cmd.resp_len = sizeof(mac); + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_R0_MAC_GET; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:mac addr get\n", ret); + ret = -EIO; + } else { +#ifndef HAVE_ETH_HW_ADDR_SET_API + memcpy(netdev->dev_addr, mac.addr, SXE_MAC_ADDR_LEN); +#else + eth_hw_addr_set(netdev, mac.addr); +#endif + } + + return ret; +} + +STATIC s32 sxe_default_mac_addr_get(struct sxe_adapter *adapter) +{ + s32 ret; + struct net_device *netdev = adapter->netdev; + + ret = sxe_get_mac_addr_from_fw(adapter); + if (ret || (!sxe_allow_inval_mac() && !is_valid_ether_addr(netdev->dev_addr))) { + LOG_DEV_WARN("invalid default mac addr:%pM result:%d\n", + netdev->dev_addr, ret); + ret = -EIO; + goto l_out; + } + + LOG_DEV_INFO("default mac addr = %pM\n", netdev->dev_addr); + ether_addr_copy(adapter->mac_filter_ctxt.def_mac_addr, netdev->dev_addr); + ether_addr_copy(adapter->mac_filter_ctxt.cur_mac_addr, netdev->dev_addr); + +l_out: + return ret; +} + +STATIC s32 sxe_mac_addr_init(struct sxe_hw *hw, struct net_device *netdev) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + + ret = sxe_mac_filter_init(adapter); + if (ret) { + LOG_ERROR_BDF("rar entry num:%u mta entry num:%u " + "mac filter init fail.(ret:%d)\n", + SXE_UC_ENTRY_NUM_MAX, + SXE_MTA_ENTRY_NUM_MAX, + ret); + goto l_ret; + } + + sxe_mac_addr_set(adapter); + + LOG_INFO_BDF("hw adapter:%p reg_base_addr:%p " + "current mac addr:%pM " + "perm mac addr:%pM\n", + adapter, hw->reg_base_addr, + adapter->mac_filter_ctxt.cur_mac_addr, + adapter->mac_filter_ctxt.def_mac_addr); + +l_ret: + return ret; +} + +STATIC struct sxe_adapter *sxe_adapter_create(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct sxe_adapter * adapter = NULL; + + netdev = alloc_etherdev_mq(sizeof(struct sxe_adapter), + SXE_TXRX_RING_NUM_MAX); + if (!netdev) { + LOG_ERROR("device[pci_id %u] sxe net device alloc failed\n", pdev->dev.id); + goto l_netdev_alloc_failed; + } + + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + adapter->netdev = netdev; + + adapter->msg_enable = netif_msg_init(-1, + SXE_MSG_LEVEL_DEFAULT); + + LOG_INFO_BDF("adapter:%pK netdev:%pK pdev:%pK\n", adapter, netdev, pdev); + +l_netdev_alloc_failed: + return adapter; +} + +void sxe_hw_start(struct sxe_hw *hw) +{ + hw->mac.auto_restart = true; + + hw->filter.vlan.ops->filter_array_clear(hw); + + hw->stat.ops->stats_clear(hw); + + hw->setup.ops->no_snoop_disable(hw); + + hw->dma.ops->dcb_rate_limiter_clear(hw, SXE_TXRX_RING_NUM_MAX); + + hw->mac.ops->fc_autoneg_localcap_set(hw); + + LOG_INFO("init auto_restart:%u\n", hw->mac.auto_restart); +} + +STATIC s32 sxe_mng_reset(struct sxe_adapter *adapter, bool enable) +{ + s32 ret; + struct sxe_driver_cmd cmd; + sxe_mng_rst_s mng_rst; + struct sxe_hw *hw = &adapter->hw; + + mng_rst.enable = enable; + LOG_INFO_BDF("mng reset, enable=%x\n", enable); + + cmd.req = &mng_rst; + cmd.req_len = sizeof(mng_rst); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_MNG_RST; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("mng reset failed, ret=%d\n", ret); + goto l_end; + } + + LOG_INFO_BDF("mng reset success, enable=%x\n", enable); + +l_end: + return ret; +} + +s32 sxe_hw_reset(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + + hw->dbu.ops->rx_cap_switch_off(hw); + + hw->irq.ops->all_irq_disable(hw); + + hw->irq.ops->pending_irq_read_clear(hw); + + hw->dma.ops->all_ring_disable(hw, SXE_TXRX_RING_NUM_MAX); + + ret = sxe_mng_reset(adapter, false); + if (ret) { + LOG_ERROR_BDF("mng reset disable failed, ret=%d\n", ret); + ret = -EPERM; + goto l_end; + } + + + ret = hw->setup.ops->reset(hw); + if (ret) { + LOG_ERROR_BDF("nic reset failed, ret=%d\n", ret); + ret = -EPERM; + goto l_end; + } + + msleep(50); + + ret = sxe_mng_reset(adapter, true); + if (ret) { + LOG_ERROR_BDF("mng reset enable failed, ret=%d\n", ret); + ret = -EPERM; + goto l_end; + } + + hw->filter.mac.ops->uc_addr_clear(hw); + + hw->filter.mac.ops->vt_disable(hw); + +l_end: + return ret; +} + +static s32 sxe_led_reset(struct sxe_adapter *adapter) +{ + s32 ret; + s32 resp; + struct sxe_led_ctrl ctrl; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + ctrl.mode = SXE_IDENTIFY_LED_RESET; + ctrl.duration = 0; + + cmd.req = &ctrl; + cmd.req_len = sizeof(ctrl); + cmd.resp = &resp; + cmd.resp_len = sizeof(resp); + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_LED_CTRL; + cmd.is_interruptible = false; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:led reset\n", ret); + ret = -EIO; + } + + LOG_INFO_BDF("led reset\n"); + + return ret; +} + +static void sxe_link_fc_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + hw->mac.ops->fc_param_init(hw); + return; +} + +static inline u32 sxe_readl(const volatile void *reg) +{ + return readl(reg); +} + +static inline void sxe_writel(u32 value, volatile void *reg) +{ + writel(value, reg); + return; +} + +STATIC int sxe_hw_base_init(struct sxe_adapter *adapter) +{ + int ret; + struct sxe_hw *hw = &adapter->hw; + + hw->adapter = adapter; + adapter->vlan_ctxt.vlan_table_size = SXE_VFT_TBL_SIZE; + + sxe_hw_ops_init(hw); + sxe_hw_reg_handle_init(hw, sxe_readl, sxe_writel); + + sxe_hdc_channel_init(&adapter->hdc_ctxt); + + hw->hdc.ops->drv_status_set(hw, (U32)true); + + ret = sxe_phy_init(adapter); + if (ret == -SXE_ERR_SFF_NOT_SUPPORTED) { + LOG_DEV_ERR("sfp is not sfp+, not supported, ret=%d\n", ret); + ret = -EPERM; + goto l_ret; + } else if (ret) { + LOG_ERROR_BDF("phy init failed, ret=%d\n", ret); + } + + ret = sxe_default_mac_addr_get(adapter); + if (ret) { + LOG_ERROR_BDF("get valid default mac addr failed, ret=%d\n", ret); + goto l_ret; + } + + sxe_link_fc_init(adapter); + + ret = sxe_hw_reset(adapter); + if (ret < 0) { + LOG_ERROR_BDF("hw init failed, ret=%d\n", ret); + goto l_ret; + } else { + sxe_hw_start(hw); + } + + ret = sxe_mac_addr_init(hw, adapter->netdev); + if (ret) { + LOG_ERROR_BDF("mac addr init fail, ret=%d\n", ret); + } + + sxe_led_reset(adapter); + +l_ret: + if (ret) { + hw->hdc.ops->drv_status_set(hw, (U32)false); + } + return ret; +} + +static void sxe_vt_init(struct sxe_adapter *adapter) +{ + set_bit(0, adapter->vt_ctxt.pf_pool_bitmap); + + sxe_sriov_init(adapter); + + return; +} + +STATIC void sxe_fnav_ctxt_init(struct sxe_adapter *adapter) +{ + adapter->fnav_ctxt.sample_rate = SXE_FNAV_DEFAULT_SAMPLE_RATE; + adapter->fnav_ctxt.rules_table_size = SXE_FNAV_RULES_TABLE_SIZE_64K; + spin_lock_init(&adapter->fnav_ctxt.specific_lock); + + adapter->fnav_ctxt.fdir_overflow_time = 0; + spin_lock_init(&adapter->fnav_ctxt.sample_lock); + adapter->fnav_ctxt.sample_rules_cnt = 0; + + return ; +} + +#ifdef HAVE_AF_XDP_ZERO_COPY +static s32 sxe_xdp_mem_alloc(struct sxe_adapter *adapter) +{ + s32 ret = 0; + + adapter->af_xdp_zc_qps = bitmap_zalloc(SXE_XDP_RING_NUM_MAX, GFP_KERNEL); + if (!adapter->af_xdp_zc_qps) { + ret = -ENOMEM; + } + + return ret; +} + +static void sxe_xdp_mem_free(struct sxe_adapter *adapter) +{ + bitmap_free(adapter->af_xdp_zc_qps); + adapter->af_xdp_zc_qps = NULL; + return; +} +#endif +void sxe_fw_version_get(struct sxe_adapter *adapter) +{ + s32 ret; + sxe_version_resp_s resp; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + cmd.req = NULL; + cmd.req_len = 0; + cmd.resp = &resp; + cmd.resp_len = sizeof(resp); + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_FW_VER_GET; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("get version failed, ret=%d\n", ret); + memset(adapter->fw_info.fw_version, 0, SXE_VERSION_LEN); + } else { + memcpy(adapter->fw_info.fw_version, resp.fw_version, + SXE_VERSION_LEN); + } + + return; +} + +static s32 sxe_sw_base_init1(struct sxe_adapter *adapter) +{ + s32 ret; + + set_bit(SXE_DOWN, &adapter->state); + + adapter->irq_ctxt.rx_irq_interval = 1; + adapter->irq_ctxt.tx_irq_interval = 1; + + spin_lock_init(&adapter->irq_ctxt.event_irq_lock); + + ret = sxe_config_space_irq_num_get(adapter); + if (ret) { + LOG_ERROR_BDF("get pci cfg irq num fail.(err:%d)\n", ret); + goto l_out; + } + + sxe_vt_init(adapter); + + sxe_ring_feature_init(adapter); + + sxe_fnav_ctxt_init(adapter); + +#ifdef HAVE_AF_XDP_ZERO_COPY + ret = sxe_xdp_mem_alloc(adapter); + if (ret) { + LOG_ERROR_BDF("xdp mem alloc failed, ret=%d\n", ret); + goto l_out; + } +#endif + sxe_fw_version_get(adapter); + + LOG_INFO_BDF("adapter rx_irq_interval:%u tx_irq_interval:%u.\n", + adapter->irq_ctxt.rx_irq_interval, + adapter->irq_ctxt.tx_irq_interval); + +l_out: + return ret; +} + +s32 sxe_ring_irq_init(struct sxe_adapter *adapter) +{ + s32 ret; + + sxe_ring_num_set(adapter); + + ret = sxe_irq_ctxt_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt context init fail.(err:%d)\n", ret); + goto l_end; + } + + sxe_ring_reg_map(adapter); + + LOG_DEV_INFO("multiqueue %s: rx queue count = %u, tx queue count = %u xdp queue count = %u\n", + (adapter->rx_ring_ctxt.num > 1) ? "enabled" : "disabled", + adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num); + +l_end: + return ret; +} + +void sxe_ring_irq_exit(struct sxe_adapter *adapter) +{ + sxe_irq_ctxt_exit(adapter); + + return; +} + +#ifdef SXE_WOL_CONFIGURE + +void sxe_wol_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->filter.mac.ops->wol_status_set(hw); + adapter->wol = 0; + sxe_fw_wol_set(adapter, 0); + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return; +} + +#endif + +static void sxe_sw_base_init2(struct sxe_adapter *adapter) +{ + sxe_ring_stats_init(adapter); + + sxe_monitor_init(adapter); + +#ifdef SXE_TPH_CONFIGURE + adapter->cap |= SXE_TPH_CAPABLE; +#endif + +#ifdef SXE_DCB_CONFIGURE + sxe_dcb_init(adapter); +#endif + +#ifdef SXE_IPSEC_CONFIGURE + sxe_ipsec_offload_init(adapter); +#endif + +#ifdef SXE_WOL_CONFIGURE + sxe_wol_init(adapter); +#endif + return; +} + +STATIC int sxe_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct sxe_adapter *adapter; + const char *device_name = dev_name(&pdev->dev); + + adapter = sxe_adapter_create(pdev); + if (!adapter) { + LOG_ERROR("adapter create failed.\n"); + ret = -ENOMEM; + goto l_adapter_create_failed; + } + + strlcpy(adapter->dev_name, device_name, + min_t(u32, strlen(device_name) + 1, DEV_NAME_LEN)); + + ret = sxe_pci_init(adapter); + if (ret) { + LOG_ERROR_BDF("pci init failed.(ret:%d)\n", ret); + goto l_pci_init_failed; + } + + ret = sxe_config_dma_mask(adapter); + if (ret) { + LOG_ERROR_BDF("config dma mask failed.(ret:%d)\n", ret); + goto l_config_dma_mask_failed; + } + + sxe_netdev_init(adapter->netdev, pdev); + + ret = sxe_hw_base_init(adapter); + if (ret) { + LOG_ERROR_BDF("hardware base init failed.(ret:%d)\n", ret); + goto l_config_dma_mask_failed; + } + + ret = sxe_sw_base_init1(adapter); + if (ret) { + LOG_ERROR_BDF("sw base init1 failed.(ret:%d)\n", ret); + goto l_sw_base_init1_failed; + } + + ret = sxe_ring_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt ring assign scheme init failed, err=%d\n", ret); + goto l_sw_base_init1_failed; + } + + ret = sxe_cli_cdev_create(adapter); + if (ret) { + LOG_ERROR_BDF("create cli char dev failed, ret = %d\n", ret); + goto l_create_cdev_failed; + } + + sxe_sw_base_init2(adapter); + + if (adapter->phy_ctxt.ops->sfp_tx_laser_disable) { + adapter->phy_ctxt.ops->sfp_tx_laser_disable(adapter); + } + + strcpy(adapter->netdev->name, "eth%d"); + ret = register_netdev(adapter->netdev); + if (ret) { + LOG_ERROR_BDF("register netdev failed.(ret:%d)\n", ret); + goto l_register_netdev_failed; + } + + netif_carrier_off(adapter->netdev); + +#ifdef SXE_TPH_CONFIGURE + sxe_tph_init(adapter); +#endif + + sxe_debugfs_entries_init(adapter); + +#ifdef SXE_PHY_CONFIGURE + sxe_mdiobus_init(adapter); +#endif + + LOG_INFO_BDF("%s %s %s %s %s pf deviceId:0x%x probe done.\n", + dev_driver_string(pdev->dev.parent), + dev_name(pdev->dev.parent), + netdev_name(adapter->netdev), + dev_driver_string(&pdev->dev), + dev_name(&pdev->dev), + pdev->device); + + return 0; + +l_register_netdev_failed: + sxe_cli_cdev_delete(adapter); +l_create_cdev_failed: + sxe_ring_irq_exit(adapter); +l_sw_base_init1_failed: +#ifdef HAVE_AF_XDP_ZERO_COPY + if (adapter->af_xdp_zc_qps) { + sxe_xdp_mem_free(adapter); + } +#endif + sxe_mac_filter_destroy(adapter); +l_config_dma_mask_failed: + sxe_pci_exit(adapter); +l_pci_init_failed: + free_netdev(adapter->netdev); +l_adapter_create_failed: + return ret; +} + +static void sxe_fuc_exit(struct sxe_adapter *adapter) +{ + + cancel_work_sync(&adapter->monitor_ctxt.work); + cancel_work_sync(&adapter->hdc_ctxt.time_sync_work); + +#ifdef SXE_PHY_CONFIGURE + sxe_mdiobus_exit(adapter); +#endif + +#ifdef SXE_TPH_CONFIGURE + sxe_tph_uninit(adapter); +#endif + + return; +} + +static void sxe_vt_exit(struct sxe_adapter *adapter) +{ + sxe_vf_exit(adapter); + + return; +} + +STATIC void sxe_remove(struct pci_dev *pdev) +{ + struct sxe_adapter * adapter = pci_get_drvdata(pdev); + struct sxe_hw *hw = &adapter->hw; + struct net_device *netdev; + + if (!adapter) { + goto l_end; + } + + set_bit(SXE_REMOVING, &adapter->state); + netdev = adapter->netdev; + + hw->hdc.ops->drv_status_set(hw, (U32)false); + + sxe_debugfs_entries_exit(adapter); + + sxe_fuc_exit(adapter); + + sxe_vt_exit(adapter); + + if (NETREG_REGISTERED == netdev->reg_state) { + unregister_netdev(netdev); + } + + sxe_cli_cdev_delete(adapter); + +#ifdef SXE_IPSEC_CONFIGURE + sxe_ipsec_offload_exit(adapter); +#endif + + sxe_ring_irq_exit(adapter); + +#ifdef SXE_DCB_CONFIGURE + sxe_dcb_exit(adapter); +#endif + + sxe_pci_exit(adapter); + + LOG_DEV_INFO("complete\n"); + + sxe_mac_filter_destroy(adapter); +#ifdef HAVE_AF_XDP_ZERO_COPY + sxe_xdp_mem_free(adapter); +#endif + free_netdev(adapter->netdev); + + LOG_INFO("%s %s %s %s deviceId:0x%x remove done.\n", + dev_driver_string(pdev->dev.parent), + dev_name(pdev->dev.parent), + dev_driver_string(&pdev->dev), + dev_name(&pdev->dev), + pdev->device); + +l_end: + return; +} + +static int __sxe_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + struct sxe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + +#ifdef SXE_WOL_CONFIGURE + u32 wol = adapter->wol; +#else + u32 wol = 0; +#endif + int ret = 0; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) { + sxe_terminate(adapter); + } + + hw->hdc.ops->drv_status_set(hw, (U32)false); + + sxe_ring_irq_exit(adapter); + rtnl_unlock(); + +#ifdef CONFIG_PM + ret = pci_save_state(pdev); + if (ret) { + LOG_DEBUG_BDF("pci save state err:%d\n", ret); + return ret; + } +#endif + +#ifdef SXE_WOL_CONFIGURE + if (wol) { + __sxe_set_rx_mode(netdev, true); + hw->filter.mac.ops->wol_mode_set(hw, wol); + } else { + hw->filter.mac.ops->wol_mode_clean(hw); + } +#endif + + pci_wake_from_d3(pdev, !!wol); + + *enable_wake = !!wol; + +#ifdef SXE_WOL_CONFIGURE + if (wol) { + hw->dbu.ops->rx_func_switch_on(hw); + } +#endif + + if (!test_and_set_bit(SXE_DISABLED, &adapter->state)) { + pci_disable_device(pdev); + } + + return ret; +} + +STATIC void sxe_shutdown(struct pci_dev *pdev) +{ + bool wol_enable = false; + + __sxe_shutdown(pdev, &wol_enable); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wol_enable); + pci_set_power_state(pdev, PCI_D3hot); + } + + return; +} + +#ifdef CONFIG_PM +static int sxe_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + bool wol_enable; + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + struct sxe_hw *hw = &adapter->hw; + + ret = __sxe_shutdown(pdev, &wol_enable); + + cancel_work_sync(&adapter->monitor_ctxt.work); + cancel_work_sync(&adapter->hdc_ctxt.time_sync_work); + + sxe_hdc_channel_destroy(hw); + if (ret) { + LOG_ERROR_BDF("driver shutdown err:%d\n", ret); + goto l_ret; + } + + LOG_DEBUG_BDF("pci dev[%p], wol_enable:%s\n", + pdev, wol_enable ? "yes" : "no"); + + if (wol_enable) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + +l_ret: + return ret; +} + +static int sxe_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + s32 ret; +#ifdef SXE_WOL_CONFIGURE + struct sxe_hw *hw = &adapter->hw; +#endif + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + pci_save_state(pdev); + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_DEV_ERR("cannot enable pci device from suspend\n"); + goto l_ret; + } + + smp_mb__before_atomic(); + clear_bit(SXE_DISABLED, &adapter->state); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + sxe_hdc_available_set(1); + + sxe_reset(adapter); + +#ifdef SXE_WOL_CONFIGURE + hw->filter.mac.ops->wol_status_set(hw); +#endif + + INIT_WORK(&adapter->hdc_ctxt.time_sync_work, sxe_time_sync_handler); + INIT_WORK(&adapter->monitor_ctxt.work, sxe_work_cb); + + rtnl_lock(); + ret = sxe_ring_irq_init(adapter); + LOG_DEBUG_BDF("ring irq init finish, ret=%d\n", ret); + if (!ret && netif_running(netdev)) { + ret = sxe_open(netdev); + LOG_DEBUG_BDF("sxe open adapter finish, ret=%d\n", ret); + } + + if (!ret) { + netif_device_attach(netdev); + } + + rtnl_unlock(); + +l_ret: + return ret; +} +#endif + +#ifdef CONFIG_PCI_IOV +#ifdef HAVE_NO_PCIE_FLR +static inline void sxe_issue_vf_flr(struct sxe_adapter *adapter, + struct pci_dev *vf_dev) +{ + int pos, i; + u16 status; + + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vf_dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + LOG_DEV_WARN("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vf_dev, PCI_CAP_ID_EXP); + if (!pos) + return; + + LOG_DEV_ERR("Issuing VFLR for VF %s\n", pci_name(vf_dev)); + pci_write_config_word(vf_dev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} +#endif +#endif + +static pci_ers_result_t sxe_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + +#ifdef CONFIG_PCI_IOV + struct sxe_hw *hw = &adapter->hw; + struct pci_dev *bdev, *vfdev; + u32 dw0, dw1, dw2, dw3; + int vf, pos; + u16 req_id, pf_func; + + LOG_DEBUG_BDF("sriov open, vf error process\n"); + if (adapter->vt_ctxt.num_vfs == 0) { + LOG_DEBUG_BDF("num vfs == 0\n"); + goto skip_bad_vf_detection; + } + + bdev = pdev->bus->self; + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) { + bdev = bdev->bus->self; + } + + if (!bdev) { + LOG_DEBUG_BDF("no pci dev\n"); + goto skip_bad_vf_detection; + } + + pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); + if (!pos) { + LOG_DEBUG_BDF("pci dev not support aer\n"); + goto skip_bad_vf_detection; + } + + dw0 = sxe_read_pci_cfg_dword(adapter, pos + PCI_ERR_HEADER_LOG); + dw1 = sxe_read_pci_cfg_dword(adapter, pos + PCI_ERR_HEADER_LOG + SXE_DW1_OFFSET); + dw2 = sxe_read_pci_cfg_dword(adapter, pos + PCI_ERR_HEADER_LOG + SXE_DW2_OFFSET); + dw3 = sxe_read_pci_cfg_dword(adapter, pos + PCI_ERR_HEADER_LOG + SXE_DW3_OFFSET); + if (sxe_is_hw_fault(hw)) { + LOG_ERROR_BDF("hw is fault\n"); + goto skip_bad_vf_detection; + } + + req_id = dw1 >> SXE_REQ_ID_SHIFT; + if (!(req_id & SXE_REQ_ID_VF_MASK)) { + LOG_DEBUG_BDF("this is not a vf\n"); + goto skip_bad_vf_detection; + } + + pf_func = req_id & SXE_REQ_ID_PF_MASK; + if ((pf_func & 1) == (pdev->devfn & 1)) { + u32 device_id = SXE_DEV_ID_ASIC; + + vf = (req_id & SXE_REQ_ID_VF_ID_MASK) >> 1; + LOG_DEV_ERR("vf %d has caused a pcie error\n", vf); + LOG_DEV_ERR("tlp: dw0: %8.8x\tdw1: %8.8x\tdw2: " + "%8.8x\tdw3: %8.8x\n", + dw0, dw1, dw2, dw3); + + vfdev = pci_get_device(PCI_VENDOR_ID_STARS, device_id, NULL); + while (vfdev) { + if (vfdev->devfn == (req_id & 0xFF)) { + break; + } + vfdev = pci_get_device(PCI_VENDOR_ID_STARS, + device_id, vfdev); + } + + if (vfdev) { + LOG_DEBUG_BDF("vfdev[%p] miss, do flr\n",vfdev); +#ifdef HAVE_NO_PCIE_FLR + sxe_issue_vf_flr(adapter, vfdev); +#else + pcie_flr(vfdev); +#endif + pci_dev_put(vfdev); + } + } + + adapter->vt_ctxt.err_refcount++; + LOG_ERROR_BDF("vf err count=%u\n", adapter->vt_ctxt.err_refcount); + ret = PCI_ERS_RESULT_RECOVERED; + goto l_ret; + +skip_bad_vf_detection: +#endif + LOG_DEBUG_BDF("oops, pci dev[%p] got io error detect, state=0x%x\n", + pdev, (u32)state); + + if (!test_bit(SXE_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state)) { + LOG_ERROR_BDF("driver adapter service not init\n"); + goto l_ret; + } + + if (!netif_device_present(netdev)) { + LOG_ERROR_BDF("pci netdev not present\n"); + goto l_ret; + } + + rtnl_lock(); + netif_device_detach(netdev); + + LOG_DEBUG_BDF("netdev[%p], detached and continue\n",netdev); + + if (netif_running(netdev)) { + sxe_terminate(adapter); + } + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + LOG_ERROR_BDF("pci channel io perm failure\n"); + goto l_ret; + } + + if (!test_and_set_bit(SXE_DISABLED, &adapter->state)) { + pci_disable_device(pdev); + } + + rtnl_unlock(); + ret = PCI_ERS_RESULT_NEED_RESET; + +l_ret: + LOG_DEBUG_BDF("netdev[%p] error detected end and ret=0x%x\n",netdev, ret); + return ret; +} + +static pci_ers_result_t sxe_io_slot_reset(struct pci_dev *pdev) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; +#ifdef SXE_WOL_CONFIGURE + struct sxe_hw *hw = &adapter->hw; +#endif + + LOG_DEBUG_BDF("oops, pci dev[%p] got io slot reset\n",pdev); + + if (pci_enable_device_mem(pdev)) { + LOG_MSG_ERR(probe, "pci dev[%p] cannot re-enable " + "pci device after reset.\n",pdev); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + LOG_DEBUG_BDF("pci dev[%p] start re-enable\n", pdev); + smp_mb__before_atomic(); + clear_bit(SXE_DISABLED, &adapter->state); + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + + sxe_reset(adapter); + +#ifdef SXE_WOL_CONFIGURE + hw->filter.mac.ops->wol_status_set(hw); +#endif + + result = PCI_ERS_RESULT_RECOVERED; + } + + LOG_DEBUG_BDF("pci dev[%p] io slot reset end result=0x%x\n", + pdev, (u32)result); + + return result; +} + +static void sxe_io_resume(struct pci_dev *pdev) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + LOG_DEBUG_BDF("oops, pci dev[%p] got io resume\n",pdev); + +#ifdef CONFIG_PCI_IOV + if (adapter->vt_ctxt.err_refcount) { + LOG_DEV_INFO("resuming after vf err\n"); + adapter->vt_ctxt.err_refcount--; + goto l_ret; + } + +#endif + rtnl_lock(); + if (netif_running(netdev)) { + LOG_DEBUG_BDF("netdev running, open adapter"); + sxe_open(netdev); + } + + netif_device_attach(netdev); + rtnl_unlock(); + LOG_DEBUG_BDF("pci dev[%p] io resume end\n",pdev); + +#ifdef CONFIG_PCI_IOV +l_ret: +#endif + return; +} + +static const struct pci_error_handlers sxe_err_handler = { + .error_detected = sxe_io_error_detected, + .slot_reset = sxe_io_slot_reset, + .resume = sxe_io_resume, +}; + +static const struct pci_device_id sxe_pci_tbl[] = { + {PCI_VENDOR_ID_STARS, SXE_DEV_ID_ASIC, PCI_ANY_ID, PCI_ANY_ID, 0, 0 ,0}, + + {0, } +}; + +static SIMPLE_DEV_PM_OPS(sxe_pm_ops, sxe_suspend, sxe_resume); +static struct pci_driver sxe_pci_driver = { + .name = SXE_DRV_NAME, + .id_table = sxe_pci_tbl, + .probe = sxe_probe, + .remove = sxe_remove, + .driver.pm = &sxe_pm_ops, + .shutdown = sxe_shutdown, + .sriov_configure = sxe_sriov_configure, + .err_handler = &sxe_err_handler +}; + +#ifdef SXE_TPH_CONFIGURE +static int sxe_tph_notify(struct notifier_block *nb, unsigned long event, + void *p) +{ + int ret_val; + + ret_val = driver_for_each_device(&sxe_pci_driver.driver, NULL, &event, + __sxe_tph_notify); + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; +} + +static struct notifier_block tph_notifier = { + .notifier_call = sxe_tph_notify, + .next = NULL, + .priority = 0 +}; +#endif + +#ifndef SXE_DRIVER_RELEASE +static ssize_t sxe_log_level_store(struct device_driver *dd, + const char *buf, size_t count) +{ + int level = 0; + ssize_t ret = count; + + if (dd == NULL || buf == NULL) { + goto l_out; + } + + if (kstrtoint(buf, 10, &level)) { + LOG_ERROR("invalid log level, could not set log level\n"); + ret = -EINVAL; + goto l_out; + } + LOG_WARN("set log level to %d\n", level); + + sxe_level_set(level); +l_out: + return ret; +} + +static ssize_t sxe_log_level_show(struct device_driver *dd, char *buf) +{ + ssize_t ret = 0; + s32 level = sxe_level_get(); + + if (dd == NULL || buf == NULL) { + goto l_out; + } + ret = snprintf(buf, PAGE_SIZE, "%d\n", level); + + LOG_DEBUG("get log level to %d\n", level); +l_out: + return ret; +} + +static ssize_t log_level_store(struct device_driver *dd, + const char *buf, size_t count) +{ + return sxe_log_level_store(dd, buf, count); +} + +static inline ssize_t log_level_show(struct device_driver *dd, char *buf) +{ + return sxe_log_level_show(dd, buf); +} + +static ssize_t sxe_dump_status_store(struct device_driver *dd, + const char *buf, size_t count) +{ + int status = 0; + ssize_t ret = count; + + if (dd == NULL || buf == NULL) { + goto l_out; + } + + if (kstrtoint(buf, 10, &status)) { + LOG_ERROR("invalid status, could not set dump status\n"); + ret = -EINVAL; + goto l_out; + } + LOG_WARN("set dump status to %d\n", status); + + sxe_bin_status_set(!!status); +l_out: + return ret; +} + +static ssize_t sxe_dump_status_show(struct device_driver *dd, char *buf) +{ + ssize_t ret = 0; + s32 status = sxe_bin_status_get(); + + if (dd == NULL || buf == NULL) { + goto l_out; + } + ret = snprintf(buf, PAGE_SIZE, "%d\n", status); + + LOG_DEBUG("get log level to %d\n", status); +l_out: + return ret; +} + +static ssize_t dump_status_store(struct device_driver *dd, + const char *buf, size_t count) +{ + return sxe_dump_status_store(dd, buf, count); +} + +static inline ssize_t dump_status_show(struct device_driver *dd, char *buf) +{ + return sxe_dump_status_show(dd, buf); +} + +static DRIVER_ATTR_RW(log_level); +static DRIVER_ATTR_RW(dump_status); + +static s32 sxe_driver_create_file(void) +{ + s32 ret; + + ret = driver_create_file(&sxe_pci_driver.driver, &driver_attr_log_level); + if (ret) { + LOG_ERROR("driver create file attr log level failed\n"); + goto l_ret; + } + + ret = driver_create_file(&sxe_pci_driver.driver, &driver_attr_dump_status); + if (ret) { + LOG_ERROR("driver create file attr dump status failed\n"); + goto l_remove_log_level; + } + + goto l_ret; + +l_remove_log_level: + driver_remove_file(&sxe_pci_driver.driver, &driver_attr_log_level); +l_ret: + return ret; +} + +static void sxe_driver_remove_file(void) +{ + driver_remove_file(&sxe_pci_driver.driver, &driver_attr_log_level); + driver_remove_file(&sxe_pci_driver.driver, &driver_attr_dump_status); + return; +} +#endif + +#ifdef SXE_DRIVER_TRACE +ssize_t trace_dump_store(struct device_driver *dd, const char *buf, size_t count) +{ + ssize_t ret = count; + + if (dd == NULL || buf == NULL) { + goto l_out; + } + + sxe_trace_dump(); + +l_out: + return ret; +} + +static inline ssize_t trace_dump_show(struct device_driver *dd, char *buf) +{ + return 0; +} + +static DRIVER_ATTR_RW(trace_dump); + +s32 sxe_trace_dump_create_file(void) +{ + s32 ret; + + ret = driver_create_file(&sxe_pci_driver.driver, &driver_attr_trace_dump); + if (ret) { + LOG_ERROR("driver create file attr log level failed\n"); + } + + return ret; +} + +void sxe_trace_dump_remove_file(void) +{ + driver_remove_file(&sxe_pci_driver.driver, &driver_attr_trace_dump); + return; +} +#endif + +static int __init sxe_init(void) +{ + int ret; + + LOG_PR_INFO("version[%s], commit_id[%s]," + "branch[%s], build_time[%s]\n", + SXE_VERSION, SXE_COMMIT_ID, + SXE_BRANCH, SXE_BUILD_TIME); + +#ifndef SXE_DRIVER_RELEASE + ret = sxe_log_init(false); + if (ret < 0) { + LOG_PR_ERR("sxe log init fail.(err:%d)\n", ret); + goto l_end; + } +#endif + + sxe_workqueue = create_singlethread_workqueue(SXE_DRV_NAME); + if (!sxe_workqueue) { + LOG_PR_ERR("failed to create workqueue\n"); + ret = -ENOMEM; + goto l_create_workque_failed; + } + + sxe_fnav_workqueue = create_singlethread_workqueue(SXE_FNAV_NAME); + if (!sxe_fnav_workqueue) { + LOG_PR_ERR("failed to create fnav workqueue\n"); + ret = -ENOMEM; + goto l_create_fnav_workque_failed; + } + + fnav_cache = kmem_cache_create("fnav_sample_cache", + sizeof(struct sxe_fnav_sample_work_info), 0, SLAB_HWCACHE_ALIGN, NULL); + if(!fnav_cache) { + LOG_PR_ERR("failed to create fnav kmem cache\n"); + ret = -ENOMEM; + goto l_create_fnav_kmem_failed; + } + + sxe_debugfs_init(); + + ret = sxe_cli_cdev_register(); + if (ret) { + LOG_ERROR("register cli char dev failed\n"); + goto l_cli_cdev_register_failed; + } + + ret = pci_register_driver(&sxe_pci_driver); + if (ret) { + goto l_pci_register_driver_failed; + } + +#ifndef SXE_DRIVER_RELEASE + ret = sxe_driver_create_file(); + if (ret) { + goto l_register_driver_failed; + } +#endif + +#ifdef SXE_DRIVER_TRACE + ret = sxe_trace_dump_create_file(); + if (ret) { + LOG_ERROR("sxe_trace_dump_create_file failed: %d\n", ret); + } +#endif + +#ifdef SXE_TPH_CONFIGURE + dca_register_notify(&tph_notifier); +#endif + LOG_INFO("sxe module init success\n"); + return 0; + +#ifndef SXE_DRIVER_RELEASE +l_register_driver_failed: + pci_unregister_driver(&sxe_pci_driver); +#endif + +l_pci_register_driver_failed: + sxe_cli_cdev_unregister(); +l_cli_cdev_register_failed: + kmem_cache_destroy(fnav_cache); + fnav_cache = NULL; + sxe_debugfs_exit(); +l_create_fnav_kmem_failed: + destroy_workqueue(sxe_fnav_workqueue); + sxe_fnav_workqueue = NULL; +l_create_fnav_workque_failed: + destroy_workqueue(sxe_workqueue); + sxe_workqueue = NULL; +l_create_workque_failed: +#ifndef SXE_DRIVER_RELEASE + sxe_log_exit(); +l_end: +#endif + LOG_INFO("sxe module init done, ret=%d\n", ret); + return ret; +} + +struct workqueue_struct *sxe_workqueue_get(void) +{ + return sxe_workqueue; +} + +static void __exit sxe_exit(void) +{ + +#ifdef SXE_TPH_CONFIGURE + dca_unregister_notify(&tph_notifier); +#endif + +#ifndef SXE_DRIVER_RELEASE + sxe_driver_remove_file(); +#endif + +#ifdef SXE_DRIVER_TRACE + sxe_trace_dump_remove_file(); +#endif + + pci_unregister_driver(&sxe_pci_driver); + + sxe_cli_cdev_unregister(); + + sxe_debugfs_exit(); + + if (sxe_workqueue) { + destroy_workqueue(sxe_workqueue); + sxe_workqueue = NULL; + } + + if (sxe_fnav_workqueue) { + destroy_workqueue(sxe_fnav_workqueue); + sxe_fnav_workqueue = NULL; + } + + if (fnav_cache) { + kmem_cache_destroy(fnav_cache); + fnav_cache = NULL; + } + +#ifndef SXE_DRIVER_RELEASE + sxe_log_exit(); +#endif + + return; +} + +MODULE_DEVICE_TABLE(pci, sxe_pci_tbl); +MODULE_INFO(build_time, SXE_BUILD_TIME); +MODULE_INFO(branch, SXE_BRANCH); +MODULE_INFO(commit_id, SXE_COMMIT_ID); +MODULE_DESCRIPTION(SXE_DRV_DESCRIPTION); +MODULE_AUTHOR(SXE_DRV_AUTHOR); +MODULE_VERSION(SXE_VERSION); +MODULE_LICENSE(SXE_DRV_LICENSE); + +module_init(sxe_init); +module_exit(sxe_exit); diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.c new file mode 100644 index 000000000000..d87fe0db5c31 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.c @@ -0,0 +1,714 @@ + +#include + +#include "sxe_hw.h" +#include "sxe_monitor.h" +#include "sxe_ptp.h" +#include "sxe_dcb.h" +#include "sxe_netdev.h" +#include "sxe_tx_proc.h" +#include "sxe_rx_proc.h" +#include "sxe_sriov.h" +#include "sxe_errno.h" + +extern struct workqueue_struct *sxe_fnav_workqueue; + +#define SXE_LINK_CHECK_WAIT_TIME (4 * HZ) +#define SXE_SFP_RESET_WAIT_TIME (2 * HZ) + +#define SXE_CHECK_LINK_TIMER_PERIOD (HZ / 10) +#define SXE_NORMAL_TIMER_PERIOD (HZ * 2) + +#ifdef SXE_SFP_DEBUG +static unsigned int sw_sfp_multi_gb_ms = SXE_SW_SFP_MULTI_GB_MS; +#ifndef SXE_TEST +module_param(sw_sfp_multi_gb_ms, uint, 0); +MODULE_PARM_DESC(sw_sfp_multi_gb_ms, "Mask LOS_N interrupt(SDP1) time after active rate switching - default is 4000"); +#endif +#endif + +void sxe_task_timer_trigger(struct sxe_adapter *adapter) +{ + set_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + LOG_DEBUG_BDF("trigger link_check subtask, state=%lx, monitor_state=%lx, is_up=%d\n", + adapter->state, adapter->monitor_ctxt.state, adapter->link.is_up); + + adapter->link.check_timeout = jiffies; + + mod_timer(&adapter->monitor_ctxt.timer, jiffies); + + return; +} + +void sxe_sfp_reset_task_submit(struct sxe_adapter *adapter) +{ + set_bit(SXE_SFP_NEED_RESET, &adapter->monitor_ctxt.state); + LOG_INFO("trigger sfp_reset subtask\n"); + adapter->link.sfp_reset_timeout = 0; + adapter->link.last_lkcfg_time = 0; + adapter->link.sfp_multispeed_time = 0; + + return; +} + +void sxe_monitor_work_schedule(struct sxe_adapter *adapter) +{ + struct workqueue_struct *wq = sxe_workqueue_get(); + + if (!test_bit(SXE_DOWN, &adapter->state) && + !test_bit(SXE_REMOVING, &adapter->state) && + !test_and_set_bit(SXE_MONITOR_WORK_SCHED, + &adapter->monitor_ctxt.state)) { + queue_work(wq, &adapter->monitor_ctxt.work); + } + + return; +} + +static void sxe_timer_cb(struct timer_list *timer) +{ + struct sxe_monitor_context *monitor = container_of(timer, + struct sxe_monitor_context, timer); + struct sxe_adapter *adapter = container_of(monitor, struct sxe_adapter, + monitor_ctxt); + unsigned long period ; + + if (test_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state) || + test_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state)) { + period = SXE_CHECK_LINK_TIMER_PERIOD; + } else { + period = SXE_NORMAL_TIMER_PERIOD; + } + + mod_timer(&adapter->monitor_ctxt.timer, period + jiffies); + + sxe_monitor_work_schedule(adapter); + + return; +} + +static void sxe_monitor_work_complete(struct sxe_adapter *adapter) +{ + BUG_ON(!test_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state)); + + smp_mb__before_atomic(); + clear_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); + + return; +} + +static void sxe_reset_work(struct sxe_adapter *adapter) +{ + if (!test_and_clear_bit(SXE_RESET_REQUESTED, &adapter->monitor_ctxt.state)) { + goto l_end; + } + + rtnl_lock(); + if (test_bit(SXE_DOWN, &adapter->state) || + test_bit(SXE_REMOVING, &adapter->state) || + test_bit(SXE_RESETTING, &adapter->state)) { + goto l_unlock; + } + + LOG_DEV_ERR("reset adapter\n"); + adapter->stats.sw.reset_work_trigger_cnt++; + + sxe_hw_reinit(adapter); + +l_unlock: + rtnl_unlock(); +l_end: + return; +} + +static void sxe_stats_update_work(struct sxe_adapter *adapter) +{ + if (test_bit(SXE_DOWN, &adapter->state) || + test_bit(SXE_REMOVING, &adapter->state) || + test_bit(SXE_RESETTING, &adapter->state)) { + goto l_end; + } + + stats_lock(adapter); + sxe_stats_update(adapter); + stats_unlock(adapter); + +l_end: + return; +} + +static void sxe_check_hang_work(struct sxe_adapter *adapter) +{ + u32 i; + u64 eics = 0; + struct sxe_irq_data *irq_priv; + struct sxe_hw *hw = &adapter->hw; + struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + if (test_bit(SXE_DOWN, &adapter->state) || + test_bit(SXE_REMOVING, &adapter->state) || + test_bit(SXE_RESETTING, &adapter->state)) { + goto l_end; + } + + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + SXE_TX_HANG_CHECK_ACTIVE(tx_ring[i]); + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + SXE_TX_HANG_CHECK_ACTIVE(xdp_ring[i]); + } + } + + if (!(adapter->cap & SXE_MSIX_ENABLED)) { + hw->irq.ops->event_irq_trigger(hw); + } else { + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + irq_priv = adapter->irq_ctxt.irq_data[i]; + if (irq_priv->tx.list.next || + irq_priv->rx.list.next) { + eics |= BIT_ULL(i); + } + } + + hw->irq.ops->ring_irq_trigger(hw, eics); + } + +l_end: + return; +} + +static void sxe_fc_configure(struct sxe_adapter *adapter) +{ + bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable; + +#ifdef SXE_DCB_CONFIGURE + if (adapter->dcb_ctxt.ieee_pfc) { + pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en); + } +#endif + + if (!((adapter->cap & SXE_DCB_ENABLE) && pfc_en)) { + LOG_DEBUG_BDF("lfc configure\n"); + sxe_fc_enable(adapter); + sxe_rx_drop_mode_set(adapter); + } +#ifdef SXE_DCB_CONFIGURE + else { + LOG_DEBUG_BDF("pfc configure\n"); + sxe_dcb_pfc_configure(adapter); + } +#endif + + return; +} + +static void sxe_vmac_configure(struct sxe_adapter *adapter) +{ + sxe_fc_configure(adapter); + + sxe_ptp_configure(adapter); + + return; +} + +static void sxe_link_update(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + unsigned long flags; + + if (!test_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state)) { + goto l_end; + } + + sxe_link_info_get(adapter, &adapter->link.speed, + &adapter->link.is_up); + + LOG_DEBUG_BDF("link update, speed=%x, is_up=%d\n", + adapter->link.speed, adapter->link.is_up); + + if (adapter->link.is_up) { + sxe_vmac_configure(adapter); + } + + if (adapter->link.is_up || \ + time_after(jiffies, (adapter->link.check_timeout + + SXE_LINK_CHECK_WAIT_TIME))) { + clear_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + + spin_lock_irqsave(&adapter->irq_ctxt.event_irq_lock, flags); + hw->irq.ops->specific_irq_enable(hw, SXE_EIMS_LSC); + spin_unlock_irqrestore(&adapter->irq_ctxt.event_irq_lock, flags); + LOG_DEBUG_BDF("clear link check requester, is_up=%d\n", + adapter->link.is_up); + } + +l_end: + return; +} + +static void sxe_link_up_handle(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + const char *speed_str; + + if (netif_carrier_ok(netdev) && + !test_bit(SXE_LINK_SPEED_CHANGE, &(adapter->monitor_ctxt.state))) { + goto l_end; + } + + clear_bit(SXE_LINK_SPEED_CHANGE, &(adapter->monitor_ctxt.state)); + + switch (adapter->link.speed) { + case SXE_LINK_SPEED_10GB_FULL: + speed_str = "10 Gbps"; + break; + case SXE_LINK_SPEED_1GB_FULL: + speed_str = "1 Gbps"; + break; + case SXE_LINK_SPEED_100_FULL: + speed_str = "100 Mbps"; + break; + case SXE_LINK_SPEED_10_FULL: + speed_str = "10 Mbps"; + break; + default: + speed_str = "unknow speed"; + break; + } + LOG_MSG_WARN(drv, "nic link is up, speed: %s\n", speed_str); + + netif_carrier_on(netdev); + + sxe_vf_rate_update(adapter); + + netif_tx_wake_all_queues(adapter->netdev); + + sxe_link_update_notify_vf_all(adapter); +l_end: + return; +} + +static void sxe_link_down_handle(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link.is_up = false; + adapter->link.speed = 0; + + if (netif_carrier_ok(netdev)) { + LOG_MSG_WARN(drv, "nic link is down\n"); + netif_carrier_off(netdev); + sxe_link_update_notify_vf_all(adapter); + } + + if (sxe_tx_ring_pending(adapter) || sxe_vf_tx_pending(adapter)) { + LOG_MSG_WARN(drv, "initiating reset to clear Tx work after link loss\n"); + set_bit(SXE_RESET_REQUESTED, &adapter->monitor_ctxt.state); + } + + return; +} + +static void sxe_detect_link_work(struct sxe_adapter *adapter) +{ + carrier_lock(adapter); + if (test_bit(SXE_DOWN, &adapter->state) || + test_bit(SXE_REMOVING, &adapter->state) || + test_bit(SXE_RESETTING, &adapter->state)) { + carrier_unlock(adapter); + goto l_end; + } + + sxe_link_update(adapter); + + if (adapter->link.is_up) { + sxe_link_up_handle(adapter); + } else { + sxe_link_down_handle(adapter); + } + carrier_unlock(adapter); + + sxe_bad_vf_flr(adapter); + + sxe_spoof_packets_check(adapter); + +l_end: + return; +} + +static s32 sxe_fnav_all_sample_rules_del(struct sxe_adapter *adapter) +{ + struct sxe_fnav_sample_filter *filter; + struct sxe_hw *hw = &adapter->hw; + struct hlist_node *tmp; + int bkt; + + if (!adapter->fnav_ctxt.sample_rules_cnt) { + return 0; + } + + flush_workqueue(sxe_fnav_workqueue); + + spin_lock(&adapter->fnav_ctxt.sample_lock); + hash_for_each_safe(adapter->fnav_ctxt.sample_list, bkt, tmp, filter, hlist) { + hw->dbu.ops->fnav_single_sample_rule_del(hw, filter->hash); + hash_del(&filter->hlist); + kfree(filter); + } + + adapter->fnav_ctxt.sample_rules_cnt = 0; + spin_unlock(&adapter->fnav_ctxt.sample_lock); + + hw->dbu.ops->fnav_sample_stats_reinit(hw); + + return 0; +} + +#ifdef NEED_BOOTTIME_SECONDS +static inline time64_t ktime_get_boottime_seconds(void) +{ + return ktime_divns(ktime_get_boottime(), NSEC_PER_SEC); +} +#endif + +STATIC void sxe_fnav_sample_reinit_work(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_hw *hw = &adapter->hw; + unsigned long flags; + + if (adapter->fnav_ctxt.fdir_overflow_time && + (ktime_get_boottime_seconds() - adapter->fnav_ctxt.fdir_overflow_time > 1)) { + adapter->fnav_ctxt.fdir_overflow_time = 0; + adapter->stats.sw.fnav_overflow++; + + if (sxe_fnav_all_sample_rules_del(adapter) == 0) { + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + set_bit(SXE_TX_FNAV_INIT_DONE, + &(adapter->tx_ring_ctxt.ring[i]->state)); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + set_bit(SXE_TX_FNAV_INIT_DONE, + &adapter->xdp_ring_ctxt.ring[i]->state); + } + + hw->irq.ops->pending_irq_write_clear(hw, SXE_EICR_FLOW_NAV); + + spin_lock_irqsave(&adapter->irq_ctxt.event_irq_lock, flags); + hw->irq.ops->specific_irq_enable(hw, SXE_EIMS_FLOW_NAV); + spin_unlock_irqrestore(&adapter->irq_ctxt.event_irq_lock, flags); + adapter->fnav_ctxt.is_sample_table_overflowed = false; + } else { + LOG_MSG_ERR(probe, "failed to finish FNAV re-initialization, " + "ignored adding FNAV APP_TR filters\n"); + } + + goto l_ret; + } + + if (!test_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state)) { + LOG_INFO_BDF("fnav not requires reinit\n"); + goto l_ret; + } + + clear_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state); + + if (test_bit(SXE_DOWN, &adapter->state)) { + LOG_INFO_BDF("sxe state is down no need fnav reinit\n"); + goto l_ret; + } + + if (!(adapter->cap & SXE_FNAV_SAMPLE_ENABLE)) { + LOG_INFO_BDF("only sample fnav mode need reinit\n"); + goto l_ret; + } + + adapter->fnav_ctxt.fdir_overflow_time = ktime_get_boottime_seconds(); + adapter->fnav_ctxt.is_sample_table_overflowed = true; + +l_ret: + LOG_INFO_BDF("fnav reinit finish, and overflow=%llu\n", + adapter->stats.sw.fnav_overflow); + return; +} + +STATIC void sxe_ptp_timer_check(struct sxe_adapter *adapter) +{ + if (test_bit(SXE_PTP_RUNNING, &adapter->state)) { + sxe_ptp_overflow_check(adapter); + if (adapter->cap & SXE_RX_HWTSTAMP_IN_REGISTER) { + sxe_ptp_rx_hang(adapter); + } + + sxe_ptp_tx_hang(adapter); + } + + return; +} + +static s32 sxe_hw_fault_handle_task(struct sxe_adapter *adapter) +{ + s32 ret = 0; + + if (sxe_is_hw_fault(&adapter->hw)) { + if (!test_bit(SXE_DOWN, &adapter->state)) { + rtnl_lock(); + sxe_down(adapter); + rtnl_unlock(); + } + + LOG_ERROR_BDF("sxe nic fault\n"); + ret = -EFAULT; + } + + return ret; +} + +STATIC void sxe_sfp_reset_work(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_monitor_context *monitor = &adapter->monitor_ctxt; + + if (!test_bit(SXE_SFP_NEED_RESET, &monitor->state)) { + goto l_end; + } + + if (adapter->link.sfp_reset_timeout && + time_after(adapter->link.sfp_reset_timeout, jiffies)) { + goto l_end; + } + + if (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state)) { + goto l_end; + } + + adapter->link.sfp_reset_timeout = jiffies + SXE_SFP_RESET_WAIT_TIME - 1; + + ret = adapter->phy_ctxt.ops->identify(adapter); + if (ret) { + LOG_WARN_BDF("monitor identify sfp failed\n"); + goto sfp_out; + } + + if (!test_bit(SXE_SFP_NEED_RESET, &monitor->state)) { + goto sfp_out; + } + + clear_bit(SXE_SFP_NEED_RESET, &monitor->state); + + set_bit(SXE_LINK_NEED_CONFIG, &monitor->state); + LOG_MSG_INFO(probe, "SFP+ reset done, trigger link_config subtask\n"); + +sfp_out: + clear_bit(SXE_IN_SFP_INIT, &adapter->state); + + if ((ret == SXE_ERR_SFF_NOT_SUPPORTED) && \ + (adapter->netdev->reg_state == NETREG_REGISTERED)) { + LOG_DEV_ERR("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + LOG_DEV_ERR("reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); + } + +l_end: + return; +} + +STATIC void sxe_sfp_link_config_work(struct sxe_adapter *adapter) +{ + s32 ret; + u32 speed; + bool autoneg; + struct sxe_monitor_context *monitor = &adapter->monitor_ctxt; + + if(time_after(jiffies, adapter->link.sfp_multispeed_time + +#ifdef SXE_SFP_DEBUG + (HZ * sw_sfp_multi_gb_ms) / SXE_HZ_TRANSTO_MS)) { +#else + (HZ * SXE_SW_SFP_MULTI_GB_MS) / SXE_HZ_TRANSTO_MS)) { +#endif + clear_bit(SXE_SFP_MULTI_SPEED_SETTING, &adapter->state); + } + + if (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state)) { + goto l_sfp_end; + } + + if (!test_bit(SXE_LINK_NEED_CONFIG, &monitor->state)) { + goto l_sfp_uninit; + } + + adapter->phy_ctxt.ops->get_link_capabilities(adapter, &speed, &autoneg); + + ret = sxe_link_configure(adapter, speed); + if (ret) { + LOG_DEV_ERR("link config err, ret=%d, try...\n", ret); + goto l_sfp_uninit; + } + + clear_bit(SXE_LINK_NEED_CONFIG, &monitor->state); + + set_bit(SXE_LINK_CHECK_REQUESTED, &monitor->state); + LOG_DEBUG("link_config subtask done, trigger link_check subtask\n"); + adapter->link.check_timeout = jiffies; + +l_sfp_uninit: + clear_bit(SXE_IN_SFP_INIT, &adapter->state); + +l_sfp_end: + return; +} + +static void sxe_fc_tx_xoff_check(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u32 i, xoff, dbu_to_mac_stats; + + if ((hw->fc.current_mode != SXE_FC_FULL) && + (hw->fc.current_mode != SXE_FC_RX_PAUSE)) { + goto l_end; + } + + xoff = hw->dbu.ops->tx_dbu_fc_status_get(hw); + dbu_to_mac_stats = hw->stat.ops->tx_dbu_to_mac_stats(hw); + xoff &= SXE_TFCS_PB0_MASK; + + if (!xoff && !dbu_to_mac_stats) { + goto l_end; + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + clear_bit(SXE_HANG_CHECK_ARMED, &adapter->tx_ring_ctxt.ring[i]->state); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + clear_bit(SXE_HANG_CHECK_ARMED, &adapter->xdp_ring_ctxt.ring[i]->state); + } + +l_end: + return; +} + +static void sxe_pfc_tx_xoff_check(struct sxe_adapter *adapter) +{ + u8 tc; + struct sxe_hw *hw = &adapter->hw; + u32 i, data, xoff[SXE_PKG_BUF_NUM_MAX], dbu_to_mac_stats; + + data = hw->dbu.ops->tx_dbu_fc_status_get(hw); + dbu_to_mac_stats = hw->stat.ops->tx_dbu_to_mac_stats(hw); + + for (i = 0; i < SXE_PKG_BUF_NUM_MAX; i++) { + xoff[i] = SXE_TFCS_PB_MASK; + xoff[i] &= data & (SXE_TFCS_PB0_MASK << i); + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + tc = adapter->tx_ring_ctxt.ring[i]->tc_idx; + + if (!xoff[tc] && !dbu_to_mac_stats) { + continue; + } else { + clear_bit(SXE_HANG_CHECK_ARMED, &adapter->tx_ring_ctxt.ring[i]->state); + } + + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + tc = adapter->xdp_ring_ctxt.ring[i]->tc_idx; + + if (!xoff[tc] && !dbu_to_mac_stats) { + continue; + } else { + clear_bit(SXE_HANG_CHECK_ARMED, &adapter->xdp_ring_ctxt.ring[i]->state); + } + } + + return; +} + +static void sxe_tx_xoff_check_work(struct sxe_adapter *adapter) +{ + bool pfc_en = adapter->dcb_ctxt.cee_cfg.pfc_mode_enable; + +#ifdef SXE_DCB_CONFIGURE + if (adapter->dcb_ctxt.ieee_pfc) { + pfc_en |= !!(adapter->dcb_ctxt.ieee_pfc->pfc_en); + } +#endif + + if (!(adapter->cap & SXE_DCB_ENABLE) || !pfc_en) { + sxe_fc_tx_xoff_check(adapter); + } else { + sxe_pfc_tx_xoff_check(adapter); + } + + return; +} + +void sxe_work_cb(struct work_struct *work) +{ + struct sxe_monitor_context *monitor = container_of(work, + struct sxe_monitor_context, work); + struct sxe_adapter *adapter = container_of(monitor, struct sxe_adapter, + monitor_ctxt); + + if (sxe_hw_fault_handle_task(adapter)) { + goto l_end; + } + + sxe_reset_work(adapter); + sxe_sfp_reset_work(adapter); + sxe_sfp_link_config_work(adapter); + + sxe_detect_link_work(adapter); + + sxe_stats_update_work(adapter); + sxe_tx_xoff_check_work(adapter); + + sxe_fnav_sample_reinit_work(adapter); + + sxe_check_hang_work(adapter); + + sxe_ptp_timer_check(adapter); + +l_end: + sxe_monitor_work_complete(adapter); + return; +} + +static void sxe_hw_fault_task_trigger(void *priv) +{ + struct sxe_adapter *adapter = (struct sxe_adapter *)priv; + + if (test_bit(SXE_MONITOR_WORK_INITED, + &adapter->monitor_ctxt.state)) { + sxe_monitor_work_schedule(adapter); + LOG_ERROR_BDF("sxe nic fault, submit monitor task and " + "perform the down operation\n"); + } + + return; +} + +void sxe_monitor_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + timer_setup(&adapter->monitor_ctxt.timer, sxe_timer_cb, 0); + + INIT_WORK(&adapter->monitor_ctxt.work, sxe_work_cb); + set_bit(SXE_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state); + clear_bit(SXE_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); + + sxe_hw_fault_handle_init(hw, sxe_hw_fault_task_trigger, adapter); + + mutex_init(&adapter->link.carrier_mutex); + mutex_init(&adapter->stats.stats_mutex); + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.h new file mode 100644 index 000000000000..e33d7f9891fb --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_monitor.h @@ -0,0 +1,53 @@ +#ifndef __SXE_MONITOR_H__ +#define __SXE_MONITOR_H__ + +#include +#include + +struct sxe_adapter; + +enum sxe_monitor_task_state { + SXE_MONITOR_WORK_INITED, + + SXE_MONITOR_WORK_SCHED, + + SXE_RESET_REQUESTED, + + SXE_LINK_CHECK_REQUESTED, + + SXE_FNAV_REQUIRES_REINIT, + + SXE_SFP_NEED_RESET, + + SXE_LINK_NEED_CONFIG, + + SXE_LINK_SPEED_CHANGE, +}; + +struct sxe_monitor_context { + struct timer_list timer; + struct work_struct work; + unsigned long state; +}; + +struct sxe_link_info { + bool is_up; + u32 speed; + struct mutex carrier_mutex; + + unsigned long check_timeout; + unsigned long sfp_reset_timeout; + unsigned long last_lkcfg_time; + unsigned long sfp_multispeed_time; +}; + +void sxe_monitor_init(struct sxe_adapter *adapter); + +void sxe_monitor_work_schedule(struct sxe_adapter *adapter); + +void sxe_task_timer_trigger(struct sxe_adapter *adapter); + +void sxe_sfp_reset_task_submit(struct sxe_adapter *adapter); + +void sxe_work_cb(struct work_struct *work); +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.c new file mode 100644 index 000000000000..8ee0cc3ddb5d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.c @@ -0,0 +1,2029 @@ + +#include +#include +#include +#include +#include +#ifdef SXE_PHY_CONFIGURE +#include +#endif + +#include "sxe_netdev.h" +#include "sxe_rx_proc.h" +#include "sxe_hw.h" +#include "sxe_tx_proc.h" +#include "sxe_log.h" +#include "sxe_irq.h" +#include "sxe_pci.h" +#include "sxe_sriov.h" +#include "sxe_ethtool.h" +#include "sxe_filter.h" +#include "sxe_netdev.h" +#include "sxe_ptp.h" +#include "sxe_monitor.h" +#include "sxe_ipsec.h" +#include "sxe_dcb.h" +#include "sxe_xdp.h" +#include "sxe_debug.h" +#include "sxe_host_hdc.h" + +#define SXE_HW_REINIT_SRIOV_DELAY (2000) + +#define SXE_UC_ADDR_DEL_WAIT_MIN (10000) +#define SXE_UC_ADDR_DEL_WAIT_MAX (20000) + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT +void sxe_macvlan_configure(struct sxe_adapter *adapter); +STATIC void sxe_macvlan_offload_reset(struct sxe_adapter *adapter); + +#ifdef HAVE_NO_MACVLAN_DEST_FILTER +static inline bool macvlan_supports_dest_filter(struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PRIVATE || + macvlan->mode == MACVLAN_MODE_VEPA || + macvlan->mode == MACVLAN_MODE_BRIDGE; +} +#endif + +#endif + +void sxe_reset(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + while (test_and_set_bit(SXE_IN_SFP_INIT, &adapter->state)) { + usleep_range(SXE_SFP_INIT_WAIT_ITR_MIN, SXE_SFP_INIT_WAIT_ITR_MAX); + } + + clear_bit(SXE_SFP_NEED_RESET, &adapter->monitor_ctxt.state); + clear_bit(SXE_LINK_NEED_CONFIG, &adapter->monitor_ctxt.state); + + + ret = sxe_hw_reset(adapter); + if (ret < 0) { + LOG_ERROR_BDF("hw init failed, ret=%d\n", ret); + } else { + sxe_hw_start(hw); + } + + clear_bit(SXE_IN_SFP_INIT, &adapter->state); + + __dev_uc_unsync(adapter->netdev, NULL); + sxe_mac_filter_reset(adapter); + + sxe_mac_addr_set(adapter); + + if (test_bit(SXE_PTP_RUNNING, &adapter->state)) { + sxe_ptp_reset(adapter); + } + +l_end: + return; +} + +u32 sxe_sw_mtu_get(struct sxe_adapter *adapter) +{ + u32 max_frame; + + max_frame = adapter->netdev->mtu + SXE_ETH_DEAD_LOAD; + + if (max_frame < (ETH_DATA_LEN + SXE_ETH_DEAD_LOAD)) { + max_frame = (ETH_DATA_LEN + SXE_ETH_DEAD_LOAD); + } + + LOG_INFO_BDF("pf netdev mtu:%u result:%u\n", + adapter->netdev->mtu, + max_frame); + + return max_frame; +} + +s32 sxe_link_config(struct sxe_adapter *adapter) +{ + s32 ret; + bool autoneg; + u32 speed = adapter->phy_ctxt.autoneg_advertised; + + if (!speed) { + adapter->phy_ctxt.ops->get_link_capabilities(adapter, + &speed, &autoneg); + } + + if (adapter->phy_ctxt.ops->link_configure != NULL) { + ret = adapter->phy_ctxt.ops->link_configure(adapter, speed); + if (ret) { + LOG_ERROR_BDF("set link speed failed, ret=%d\n", ret); + goto l_end; + } + } + + ret = adapter->phy_ctxt.ops->reset(adapter); + if (ret) { + LOG_ERROR_BDF("phy reset failed, ret=%d\n", ret); + goto l_end; + } + + LOG_INFO_BDF("speed config seccess, speed=%x\n", speed); + +l_end: + return ret; +} + +static void sxe_txrx_enable(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->dbu.ops->rx_cap_switch_on(hw); + + return; +} + +static inline void sxe_vt2_configure(struct sxe_adapter *adapter) +{ +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + sxe_macvlan_configure(adapter); +#endif +} + +int sxe_open(struct net_device *netdev) +{ + int ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + if (test_bit(SXE_TESTING, &adapter->state)) { + ret = -EBUSY; + goto l_end; + } + + netif_carrier_off(netdev); + + ret = sxe_host_to_fw_time_sync(adapter); + if (ret) { + LOG_ERROR_BDF("fw time sync failed, fw status err, ret=%d\n", ret); + goto l_end; + } + +#ifdef SXE_DCB_CONFIGURE + sxe_dcb_configure(adapter); +#endif + + sxe_vt1_configure(adapter); + +#ifdef SXE_TPH_CONFIGURE + if (adapter->cap & SXE_TPH_CAPABLE) { + sxe_tph_setup(adapter); + } +#endif + + ret = sxe_tx_configure(adapter); + if (ret) { + LOG_ERROR_BDF("tx config failed, ret=%d\n", ret); + goto l_reset; + } + + ret = sxe_rx_configure(adapter); + if (ret) { + LOG_ERROR_BDF("rx config failed, reset and wait for next insmode\n"); + goto l_free_tx; + } + + sxe_vt2_configure(adapter); + +#ifdef SXE_IPSEC_CONFIGURE + sxe_ipsec_table_restore(adapter); +#endif + + ret = sxe_irq_configure(adapter); + if (ret) { + LOG_ERROR_BDF("irq config failed, ret=%d\n", ret); + goto l_free_rx; + } + + sxe_txrx_enable(adapter); + + sxe_task_timer_trigger(adapter); + + hw->setup.ops->pf_rst_done_set(hw); + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + sxe_vf_enable_and_reinit_notify_vf_all(adapter); +#endif + + LOG_INFO_BDF("open success\n"); + return 0; + +l_free_rx: + sxe_rx_release(adapter); +l_free_tx: + sxe_tx_release(adapter); +l_reset: + sxe_reset(adapter); +l_end: + return ret; +} + +static void sxe_netif_disable(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + return; +} + +static void sxe_hw_disable(struct sxe_adapter *adapter) +{ + sxe_hw_rx_disable(adapter); + + sxe_hw_irq_disable(adapter); + + sxe_hw_tx_disable(adapter); + + return; +} + +static void sxe_txrx_ring_clean(struct sxe_adapter *adapter) +{ + u32 i; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sxe_tx_ring_buffer_clean(adapter->tx_ring_ctxt.ring[i]); + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + sxe_tx_ring_buffer_clean(adapter->xdp_ring_ctxt.ring[i]); + } + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxe_rx_ring_buffer_clean(adapter->rx_ring_ctxt.ring[i]); + } + + return ; +} + +STATIC void sxe_resource_release(struct sxe_adapter *adapter) +{ + sxe_irq_release(adapter); + + sxe_rx_release(adapter); + + sxe_tx_release(adapter); + + return; +} + +static void sxe_txrx_disable(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + sxe_netif_disable(netdev); + + sxe_hw_disable(adapter); + + sxe_napi_disable(adapter); + + return ; +} + +void sxe_down(struct sxe_adapter *adapter) +{ + LOG_INFO_BDF("down start\n"); + carrier_lock(adapter); + if (test_and_set_bit(SXE_DOWN, &adapter->state)) { + carrier_unlock(adapter); + goto l_end; + } + + sxe_txrx_disable(adapter); + carrier_unlock(adapter); + + clear_bit(SXE_RESET_REQUESTED, &adapter->monitor_ctxt.state); + clear_bit(SXE_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + clear_bit(SXE_FNAV_REQUIRES_REINIT, &adapter->monitor_ctxt.state); + + del_timer_sync(&adapter->monitor_ctxt.timer); + sxe_vf_down(adapter); + + if (!pci_channel_offline(adapter->pdev)) { + sxe_reset(adapter); + } + + if (adapter->phy_ctxt.ops->sfp_tx_laser_disable) { + adapter->phy_ctxt.ops->sfp_tx_laser_disable(adapter); + } + + sxe_txrx_ring_clean(adapter); +l_end: + LOG_INFO_BDF("down finish\n"); + return; +} + +static void sxe_fuc_resource_release(struct sxe_adapter *adapter) +{ + sxe_ptp_suspend(adapter); + + return ; +} + +void sxe_terminate(struct sxe_adapter *adapter) +{ + sxe_fuc_resource_release(adapter); + + sxe_down(adapter); + + sxe_resource_release(adapter); + + return ; +} + +void sxe_up(struct sxe_adapter *adapter) +{ +#ifdef SXE_DCB_CONFIGURE + sxe_dcb_configure(adapter); +#endif + + sxe_vt1_configure(adapter); + +#ifdef SXE_IPSEC_CONFIGURE + sxe_ipsec_table_restore(adapter); +#endif + +#ifdef SXE_TPH_CONFIGURE + if (adapter->cap & SXE_TPH_CAPABLE) { + sxe_tph_setup(adapter); + } +#endif + + sxe_hw_tx_configure(adapter); + + sxe_hw_rx_configure(adapter); + + sxe_vt2_configure(adapter); + + sxe_hw_irq_configure(adapter); + + sxe_txrx_enable(adapter); + + sxe_task_timer_trigger(adapter); + + adapter->hw.setup.ops->pf_rst_done_set(&adapter->hw); + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + sxe_vf_enable_and_reinit_notify_vf_all(adapter); +#endif + + LOG_INFO_BDF("up finish\n"); + return ; +} + +void sxe_hw_reinit(struct sxe_adapter *adapter) +{ + WARN_ON(in_interrupt()); + + netif_trans_update(adapter->netdev); + + while (test_and_set_bit(SXE_RESETTING, &adapter->state)) { + usleep_range(1000, 2000); + } + + sxe_down(adapter); + + if (adapter->cap & SXE_SRIOV_ENABLE) { + msleep(SXE_HW_REINIT_SRIOV_DELAY); + } + + sxe_up(adapter); + clear_bit(SXE_RESETTING, &adapter->state); + + LOG_INFO_BDF("reinit finish\n"); + return; +} + +void sxe_do_reset(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + sxe_hw_reinit(adapter); + } else { + sxe_reset(adapter); + } + + return; +} + +int sxe_close(struct net_device *netdev) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + sxe_ptp_stop(adapter); + + sxe_terminate(adapter); + + sxe_fnav_rules_clean(adapter); + + LOG_INFO_BDF("close finish\n"); + return 0; +} + +static void sxe_vlan_strip_enable(struct sxe_adapter *adapter) +{ + u32 i, j; + struct sxe_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + struct sxe_ring *ring = adapter->rx_ring_ctxt.ring[i]; + + if(!netif_is_sxe(ring->netdev)) { + continue; + } + + j = ring->reg_idx; + hw->dma.ops->vlan_tag_strip_switch(hw, j, true); + } + + return; +} + +static void sxe_vlan_strip_disable(struct sxe_adapter *adapter) +{ + u32 i, j; + struct sxe_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + struct sxe_ring *ring = adapter->rx_ring_ctxt.ring[i]; + + j = ring->reg_idx; + hw->dma.ops->vlan_tag_strip_switch(hw, j, false); + } + + return; +} + +static void sxe_refill_vfta(struct sxe_adapter *adapter, u32 vfta_offset) +{ + u32 i, vid, word, bits; + struct sxe_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * VF_BLOCK_BITS; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * VF_BLOCK_BITS); + + for (i = SXE_VLVF_ENTRIES; --i;) { + u32 vlvf = hw->filter.vlan.ops->pool_filter_read(hw, i); + + vid = vlvf & VLAN_VID_MASK; + + if (vid < vid_start || vid >= vid_end) { + continue; + } + + if (vlvf) { + vfta[(vid - vid_start) / VF_BLOCK_BITS] |= BIT(vid % VF_BLOCK_BITS); + + if (test_bit(vid, adapter->vlan_ctxt.active_vlans)) + continue; + } + + word = i * 2 + PF_POOL_INDEX(0) / VF_BLOCK_BITS; + + bits = ~BIT(PF_POOL_INDEX(0) % VF_BLOCK_BITS); + bits &= hw->filter.vlan.ops->pool_filter_bitmap_read(hw, word); + hw->filter.vlan.ops->pool_filter_bitmap_write(hw, word, bits); + } + + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * VF_BLOCK_BITS; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->vlan_ctxt.active_vlans[word] >> bits; + hw->filter.vlan.ops->filter_array_write(hw, vfta_offset + i, vfta[i]); + } + + return; +} + +static void sxe_vlan_promisc_disable(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->filter.vlan.ops->filter_switch(hw, true); + return; +} + +static void sxe_vf_vlan_promisc_disable(struct sxe_adapter *adapter) +{ + u32 i; + + if (!(adapter->cap & SXE_VLAN_PROMISC)) { + goto l_end; + } + + adapter->cap &= ~SXE_VLAN_PROMISC; + + for (i = 0; i < adapter->vlan_ctxt.vlan_table_size; i += VFTA_BLOCK_SIZE) { + sxe_refill_vfta(adapter, i); + } + +l_end: + return ; +} + +static void sxe_vlan_promisc_enable(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->filter.vlan.ops->filter_switch(hw, false); + return; +} + +static void sxe_vf_vlan_promisc_enable(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_hw *hw = &adapter->hw; + + hw->filter.vlan.ops->filter_switch(hw, true); + + if (adapter->cap & SXE_VLAN_PROMISC) { + goto l_end; + } + + adapter->cap |= SXE_VLAN_PROMISC; + + for (i = SXE_VLVF_ENTRIES; --i;) { + u32 reg_offset = i * 2 + PF_POOL_INDEX(0) / VF_BLOCK_BITS; + u32 vlvfb = hw->filter.vlan.ops->pool_filter_bitmap_read(hw, reg_offset); + + vlvfb |= BIT(PF_POOL_INDEX(0) % VF_BLOCK_BITS); + hw->filter.vlan.ops->pool_filter_bitmap_write(hw, reg_offset, vlvfb); + } + + for (i = adapter->vlan_ctxt.vlan_table_size; i--;) { + hw->filter.vlan.ops->filter_array_write(hw, i, ~0U); + } + +l_end: + return; +} + +static void sxe_set_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_DEBUG_BDF("netdev[%p]'s vlan strip %s\n", netdev, + (features & NETIF_F_HW_VLAN_CTAG_RX) ? "enabled" : "disabled"); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + sxe_vlan_strip_enable(adapter); + } else { + sxe_vlan_strip_disable(adapter); + } + + LOG_DEBUG_BDF("netdev[%p]'s pf vlan promisc %s\n", netdev, + (features & NETIF_F_HW_VLAN_CTAG_FILTER) ? "disabled" : "enabled"); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + sxe_vlan_promisc_disable(adapter); + + if (adapter->cap & SXE_MACVLAN_ENABLE) { + LOG_DEBUG_BDF("netdev[%p]'s vf vlan promisc disabled\n", netdev); + sxe_vf_vlan_promisc_disable(adapter); + } + } else { + sxe_vlan_promisc_enable(adapter); + + if (adapter->cap & SXE_MACVLAN_ENABLE) { + LOG_DEBUG_BDF("netdev[%p]'s vf vlan promisc enabled\n", netdev); + sxe_vf_vlan_promisc_enable(adapter); + } + } + + return; +} + +s32 sxe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + LOG_INFO_BDF("netdev[%p] add vlan: proto[%u], vid[%u]\n", netdev, proto, vid); + if (!vid || !(adapter->cap & SXE_VLAN_PROMISC)) { + hw->filter.vlan.ops->filter_configure(hw, vid, PF_POOL_INDEX(0), true, !!vid); + } + + set_bit(vid, adapter->vlan_ctxt.active_vlans); + + return 0; +} + +static s32 sxe_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + LOG_INFO_BDF("kill vlan: proto[%u], vid[%u]\n", proto, vid); + if (vid && !(adapter->cap & SXE_VLAN_PROMISC)) { + hw->filter.vlan.ops->filter_configure(hw, vid, PF_POOL_INDEX(0), false, true); + } + + clear_bit(vid, adapter->vlan_ctxt.active_vlans); + + return 0; +} + +void __sxe_set_rx_mode(struct net_device *netdev, bool lock) +{ + u32 flt_ctrl; + s32 count; + u32 vmolr = SXE_VMOLR_BAM | SXE_VMOLR_AUPE; + netdev_features_t features = netdev->features; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + unsigned long flags; + + flt_ctrl = hw->filter.mac.ops->rx_mode_get(hw); + LOG_DEBUG_BDF("read flt_ctrl=0x%x\n", flt_ctrl); + + flt_ctrl &= ~SXE_FCTRL_SBP; + flt_ctrl |= SXE_FCTRL_BAM; + + flt_ctrl &= ~(SXE_FCTRL_UPE | SXE_FCTRL_MPE); + if (netdev->flags & IFF_PROMISC) { + flt_ctrl |= (SXE_FCTRL_UPE | SXE_FCTRL_MPE); + vmolr |= SXE_VMOLR_MPE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + LOG_INFO_BDF("both unicast promisc and multicast promisc enabled." + "flags:0x%x flt_ctrl:0x%x vmolr:0x%x features:0x%llx\n", + netdev->flags, flt_ctrl, vmolr, features); + } else { + if (netdev->flags & IFF_ALLMULTI) { + flt_ctrl |= SXE_FCTRL_MPE; + vmolr |= SXE_VMOLR_MPE; + LOG_INFO_BDF("unicast promisc enabled." + "flags:0x%x flt_ctrl:0x%x vmolr:0x%x features:0x%llx\n", + netdev->flags, flt_ctrl, vmolr, features); + } + } + + if (features & NETIF_F_RXALL) { + flt_ctrl |= (SXE_FCTRL_SBP | + SXE_FCTRL_BAM ); + + } + + if (__dev_uc_sync(netdev, sxe_uc_sync, sxe_uc_unsync)) { + flt_ctrl |= SXE_FCTRL_UPE; + vmolr |= SXE_VMOLR_ROPE; + LOG_ERROR_BDF("uc addr sync fail, enable unicast promisc." + "flags:0x%x flt_ctrl:0x%x vmolr:0x%x features:0x%llx\n", + netdev->flags, flt_ctrl, vmolr, features); + } + + if (lock) { + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + count = sxe_mc_addr_add(netdev); + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + } else { + count = sxe_mc_addr_add(netdev); + } + + if (count < 0) { + flt_ctrl |= SXE_FCTRL_MPE; + vmolr |= SXE_VMOLR_MPE; + LOG_ERROR_BDF("mc addr add fail count:%d, enable multicast promisc." + "flags:0x%x flt_ctrl:0x%x vmolr:0x%x features:0x%llx\n", + count, netdev->flags, flt_ctrl, vmolr, features); + } else if (count) { + vmolr |= SXE_VMOLR_ROMPE; + } + + vmolr |= hw->filter.mac.ops->pool_rx_mode_get(hw, PF_POOL_INDEX(0)) & + (~(SXE_VMOLR_MPE | SXE_VMOLR_ROMPE | SXE_VMOLR_ROPE)); + + hw->filter.mac.ops->pool_rx_mode_set(hw, vmolr, PF_POOL_INDEX(0)); + + LOG_DEBUG_BDF("write flt_ctrl=0x%x\n", flt_ctrl); + hw->filter.mac.ops->rx_mode_set(hw, flt_ctrl); + + sxe_set_vlan_mode(netdev, features); + + return; +} + +void sxe_set_rx_mode(struct net_device *netdev) +{ + __sxe_set_rx_mode(netdev, true); +} + +static int sxe_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + +#ifdef HAVE_XDP_SUPPORT + u32 new_frame_size; + if (adapter->xdp_prog) { + new_frame_size = new_mtu + SXE_ETH_DEAD_LOAD; + if (new_frame_size > sxe_max_xdp_frame_size(adapter)) { + LOG_MSG_WARN(probe, "requested mtu size is not" + "supported with xdp\n"); + ret = -EINVAL; + goto l_ret; + } + } +#endif + + LOG_MSG_INFO(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + sxe_hw_reinit(adapter); + } + +#ifdef HAVE_XDP_SUPPORT +l_ret: +#endif + return ret; +} + +static int sxe_set_features(struct net_device *netdev, + netdev_features_t features) +{ + bool need_reset = false; + struct sxe_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + + if (!(features & NETIF_F_LRO)) { + if (adapter->cap & SXE_LRO_ENABLE) { + need_reset = true; + } + adapter->cap &= ~SXE_LRO_ENABLE; + LOG_DEBUG_BDF("lro disabled and need_reset is %s\n", + need_reset ? "true" : "false"); + } else if ((adapter->cap & SXE_LRO_CAPABLE) && + !(adapter->cap & SXE_LRO_ENABLE)) { + if (adapter->irq_ctxt.rx_irq_interval == 1 || + adapter->irq_ctxt.rx_irq_interval > SXE_MIN_LRO_ITR) { + adapter->cap |= SXE_LRO_ENABLE; + need_reset = true; + LOG_DEBUG_BDF("lro enabled and need reset," + " rx_irq_throttle=%u\n", + adapter->irq_ctxt.rx_irq_interval); + } else if ((changed ^ features) & NETIF_F_LRO) { + LOG_MSG_WARN(probe, "irq interval set too low, lro can not process" + "disabling LRO\n"); + } + } + + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { + if (!(adapter->cap & SXE_FNAV_SPECIFIC_ENABLE)) { + need_reset = true; + } + + adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE; + adapter->cap |= SXE_FNAV_SPECIFIC_ENABLE; + LOG_DEBUG_BDF("switch to specific mode and need_reset is %s\n", + need_reset ? "true" : "false"); + } else { + if (adapter->cap & SXE_FNAV_SPECIFIC_ENABLE) { + need_reset = true; + } + + adapter->cap &= ~SXE_FNAV_SPECIFIC_ENABLE; + LOG_DEBUG_BDF("switch off specific mode and need_reset is %s\n", + need_reset ? "true" : "false"); + + if ((adapter->cap & SXE_SRIOV_ENABLE) || + (sxe_dcb_tc_get(adapter) > 1) || + (adapter->ring_f.rss_limit <= 1)) { + LOG_DEBUG_BDF("can not switch to sample mode. vt_mode=%s," + " tcs=%u, rss_limit=%u\n", + (adapter->cap & SXE_SRIOV_ENABLE) ? "on" : "off", + sxe_dcb_tc_get(adapter), adapter->ring_f.rss_limit); + } else { + adapter->cap |= SXE_FNAV_SAMPLE_ENABLE; + LOG_DEBUG_BDF("switch to sample mode and need_reset is %s\n", + need_reset ? "true" : "false"); + } + } + + if (changed & NETIF_F_RXALL) { + need_reset = true; + } + + netdev->features = features; + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && + adapter->pool_f.pf_num_used > 1) { + sxe_macvlan_offload_reset(adapter); + } else if (need_reset) { +#else + if (need_reset) { +#endif + sxe_do_reset(netdev); + } else if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + __sxe_set_rx_mode(netdev, true); + } + + return 1; +} + +static netdev_features_t sxe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (!(features & NETIF_F_RXCSUM)) { + LOG_DEBUG_BDF("netif rxcsum off, and lro need off too"); + features &= ~NETIF_F_LRO; + } + + if (!(adapter->cap & SXE_LRO_CAPABLE)) { + LOG_DEBUG_BDF("sxe capacity not support lro, turn lro off"); + features &= ~NETIF_F_LRO; + } + + if (adapter->xdp_prog && (features & NETIF_F_LRO)) { + LOG_DEV_ERR("lro is not supported with xdp\n"); + features &= ~NETIF_F_LRO; + } + + return features; +} + +static netdev_features_t sxe_features_check(struct sk_buff *skb, + struct net_device *dev, netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + netdev_features_t changed_features = features; + + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > SXE_MAX_MAC_HDR_LEN)) { + LOG_DEBUG("mac_hdr_len=%u > %u\n", + mac_hdr_len, SXE_MAX_MAC_HDR_LEN); + SKB_DUMP(skb); + changed_features = (features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6)); + goto l_ret; + } + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > SXE_MAX_NETWORK_HDR_LEN)) { + LOG_DEBUG("network_hdr_len=%u > %u\n", + network_hdr_len, SXE_MAX_NETWORK_HDR_LEN); + SKB_DUMP(skb); + changed_features = (features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6)); + goto l_ret; + } + + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { +#ifdef SXE_IPSEC_CONFIGURE + if (!secpath_exists(skb)) +#endif + changed_features = features & ~NETIF_F_TSO; + } + +l_ret: + return changed_features; +} + +static void sxe_rx_stats_update(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_ring *rx_ring; + struct net_device *netdev = adapter->netdev; + struct sxe_sw_stats *sw_stats = &adapter->stats.sw; + u64 alloc_rx_page = 0, lro_count = 0, lro_flush = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 non_eop_descs = 0, bytes = 0, packets = 0, hw_csum_rx_error = 0; + + if (adapter->cap & SXE_LRO_ENABLE) { + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + rx_ring = READ_ONCE(adapter->rx_ring_ctxt.ring[i]); + if (!rx_ring) { + continue; + } + + lro_count += rx_ring->rx_stats.lro_count; + lro_flush += rx_ring->rx_stats.lro_flush; + } + sw_stats->lro_total_count = lro_count; + sw_stats->lro_total_flush = lro_flush; + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + rx_ring = READ_ONCE(adapter->rx_ring_ctxt.ring[i]); + if (!rx_ring) { + continue; + } + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + + sw_stats->non_eop_descs = non_eop_descs; + sw_stats->alloc_rx_page = alloc_rx_page; + sw_stats->alloc_rx_page_failed = alloc_rx_page_failed; + sw_stats->alloc_rx_buff_failed = alloc_rx_buff_failed; + sw_stats->hw_csum_rx_error = hw_csum_rx_error; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + return; +} + +static void sxe_tx_stats_update(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_ring *tx_ring; + struct sxe_ring *xdp_ring; + struct net_device *netdev = adapter->netdev; + struct sxe_sw_stats *sw_stats = &adapter->stats.sw; + u64 bytes = 0, packets = 0, restart_queue = 0, tx_busy = 0; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + tx_ring = adapter->tx_ring_ctxt.ring[i]; + if (!tx_ring) { + continue; + } + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + xdp_ring = adapter->xdp_ring_ctxt.ring[i]; + if (!xdp_ring) { + continue; + } + + restart_queue += xdp_ring->tx_stats.restart_queue; + tx_busy += xdp_ring->tx_stats.tx_busy; + bytes += xdp_ring->stats.bytes; + packets += xdp_ring->stats.packets; + } + sw_stats->restart_queue = restart_queue; + sw_stats->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; + + return; +} + +static void sxe_hw_stats_update(struct sxe_adapter *adapter) +{ + u32 i; + u64 total_mpc = 0; + struct sxe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct sxe_mac_stats *hw_stats = &adapter->stats.hw; + + hw->stat.ops->stats_get(hw, hw_stats); + + netdev->stats.multicast = hw_stats->mprc; + + netdev->stats.rx_errors = hw_stats->crcerrs + hw_stats->rlec; + netdev->stats.rx_dropped = 0; + netdev->stats.rx_length_errors = hw_stats->rlec; + netdev->stats.rx_crc_errors = hw_stats->crcerrs; + + for (i = 0; i < 8; i++) { + total_mpc += hw_stats->dburxdrofpcnt[i]; + } + netdev->stats.rx_missed_errors = total_mpc; + + return; +} + +void sxe_stats_update(struct sxe_adapter *adapter) +{ + if (test_bit(SXE_DOWN, &adapter->state) || + test_bit(SXE_RESETTING, &adapter->state)) { + goto l_end; + } + + sxe_rx_stats_update(adapter); + + sxe_tx_stats_update(adapter); + + sxe_hw_stats_update(adapter); + +l_end: + return; +} + +static void sxe_ring_stats64_get(struct rtnl_link_stats64 *stats, + struct sxe_ring *ring, + bool is_rx) +{ + u32 start; + u64 bytes, packets; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + if (true == is_rx) { + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } else { + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + + return; +} + +#ifdef NO_VOID_NDO_GET_STATS64 +static struct rtnl_link_stats64 * +sxe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static void sxe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + u32 i; + struct sxe_ring *ring; + struct sxe_adapter *adapter = netdev_priv(netdev); + + rcu_read_lock(); + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ring = READ_ONCE(adapter->rx_ring_ctxt.ring[i]); + sxe_ring_stats64_get(stats, ring, true); + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ring = READ_ONCE(adapter->tx_ring_ctxt.ring[i]); + sxe_ring_stats64_get(stats, ring, false); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + ring = READ_ONCE(adapter->xdp_ring_ctxt.ring[i]); + sxe_ring_stats64_get(stats, ring, false); + } + rcu_read_unlock(); + + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + +#ifdef NO_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +static int sxe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + int ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + ret = sxe_ptp_hw_tstamp_config_set(adapter, req); + break; + case SIOCGHWTSTAMP: + ret = sxe_ptp_hw_tstamp_config_get(adapter, req); + break; + + default: +#ifdef SXE_PHY_CONFIGURE + ret = mdio_mii_ioctl(&adapter->phy_ctxt.mdio, if_mii(req), cmd); +#else + ret = -EOPNOTSUPP; +#endif + } + + return ret; +} + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + +#ifdef NEED_SET_MACVLAN_MODE +static void sxe_macvlan_set_rx_mode (struct net_device *dev, unsigned int pool, + struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u32 vmolr; + + vmolr = hw->filter.mac.ops->pool_rx_mode_get(hw, SXE_VMOLR(pool)); + vmolr |= (SXE_VMOLR_ROMPE | SXE_VMOLR_BAM | SXE_VMOLR_AUPE); + + vmolr &= ~SXE_VMOLR_MPE; + + if (dev->flags & IFF_ALLMULTI) { + vmolr |= SXE_VMOLR_MPE; + } else { + vmolr |= SXE_VMOLR_ROMPE; + } + + hw->filter.mac.ops->pool_rx_mode_set(hw, vmolr, pool); +} +#endif + +static int sxe_macvlan_ring_configure(struct sxe_adapter *adapter, + struct sxe_macvlan *accel) +{ + s32 ret; + u32 i, baseq; + struct net_device *vdev = accel->netdev; +#ifndef HAVE_NO_SB_BIND_CHANNEL + u16 rss_i = sxe_rss_num_get(adapter); + int num_tc = netdev_get_num_tc(adapter->netdev); +#endif + + baseq = accel->pool * adapter->ring_f.ring_per_pool; + LOG_DEV_DEBUG("pool %i:%i queues %i:%i\n", + accel->pool, adapter->pool_f.pf_num_used, + baseq, baseq + adapter->ring_f.ring_per_pool); + + accel->rx_ring_offset = baseq; + accel->tx_ring_offset = baseq; + +#ifndef HAVE_NO_SB_BIND_CHANNEL + for (i = 0; i < num_tc; i++) { + netdev_bind_sb_channel_queue(adapter->netdev, vdev, + i, rss_i, baseq + (rss_i * i)); + } +#endif + + for (i = 0; i < adapter->ring_f.ring_per_pool; i++) { + adapter->rx_ring_ctxt.ring[baseq + i]->netdev = vdev; + } + + wmb(); + + ret = sxe_uc_addr_add(&adapter->hw, + adapter->mac_filter_ctxt.uc_addr_table, + vdev->dev_addr, PF_POOL_INDEX(accel->pool)); + if (ret >= 0) { +#ifdef NEED_SET_MACVLAN_MODE + sxe_macvlan_set_rx_mode(vdev, accel->pool, adapter); +#endif + goto l_end; + } + +#ifndef HAVE_NO_MACVLAN_RELEASE + macvlan_release_l2fw_offload(vdev); +#endif + + for (i = 0; i < adapter->ring_f.ring_per_pool; i++) { + adapter->rx_ring_ctxt.ring[baseq + i]->netdev = NULL; + } + + LOG_DEV_ERR("l2fw offload disabled due to L2 filter error\n"); + +#ifndef HAVE_NO_SB_BIND_CHANNEL + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); +#endif + + clear_bit(accel->pool, adapter->vt_ctxt.pf_pool_bitmap); + kfree(accel); + +l_end: + return ret; +} + +#ifndef NO_NEED_POOL_DEFRAG +#ifdef HAVE_NETDEV_NESTED_PRIV +STATIC int sxe_macvlan_pool_reassign(struct net_device *vdev, + struct netdev_nested_priv *priv) +{ + struct sxe_adapter *adapter = (struct sxe_adapter *)priv->data; +#else +STATIC int sxe_macvlan_pool_reassign(struct net_device *vdev, void *data) +{ + struct sxe_adapter *adapter = data; +#endif + u32 pool; + struct sxe_macvlan *accel; + + if (!netif_is_macvlan(vdev)) { + goto l_end; + } + + accel = macvlan_accel_priv(vdev); + if (!accel) { + goto l_end; + } + + pool = find_first_zero_bit(adapter->vt_ctxt.pf_pool_bitmap, + adapter->pool_f.pf_num_used); + LOG_INFO_BDF("free pool=%u, pf pool used=%u\n", + pool, adapter->pool_f.pf_num_used); + if (pool < adapter->pool_f.pf_num_used) { + set_bit(pool, adapter->vt_ctxt.pf_pool_bitmap); + accel->pool = pool; + goto l_end; + } + + LOG_DEV_ERR("l2fw offload disabled due to lack of queue resources\n"); + macvlan_release_l2fw_offload(vdev); + netdev_unbind_sb_channel(adapter->netdev, vdev); + netdev_set_sb_channel(vdev, 0); + + kfree(accel); + +l_end: + return 0; +} + +void sxe_macvlan_pools_defrag(struct net_device *dev) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + +#ifdef HAVE_NETDEV_NESTED_PRIV + struct netdev_nested_priv priv = { + .data = (void *)adapter, + }; + bitmap_clear(adapter->vt_ctxt.pf_pool_bitmap, 1, 63); + netdev_walk_all_upper_dev_rcu(dev, sxe_macvlan_pool_reassign, &priv); +#else + bitmap_clear(adapter->vt_ctxt.pf_pool_bitmap, 1, 63); + netdev_walk_all_upper_dev_rcu(dev, sxe_macvlan_pool_reassign, adapter); +#endif + return; +} +#endif + +static s32 sxe_macvlan_pools_assign(struct sxe_adapter *adapter) +{ + s32 ret; + u32 pool; + u16 assigned_pools, total_pools; + u8 tcs = sxe_dcb_tc_get(adapter) ? : 1; + u16 *pf_pools = &adapter->pool_f.pf_num_used; + + pool = find_first_zero_bit(adapter->vt_ctxt.pf_pool_bitmap, *pf_pools); + if (pool == adapter->pool_f.pf_num_used) { + total_pools = adapter->vt_ctxt.num_vfs + *pf_pools; + + if (((adapter->cap & SXE_DCB_ENABLE) && + *pf_pools >= (SXE_TXRX_RING_NUM_MAX / tcs)) || + *pf_pools > SXE_MAX_MACVLANS) { + LOG_ERROR_BDF("macvlan pool exceed the limit, cap=%x, pf_pool_num=%u\n", + adapter->cap, *pf_pools); + ret = -EBUSY; + goto l_end; + } + + if (total_pools >= SXE_POOLS_NUM_MAX) { + LOG_ERROR_BDF("pool num exceed the limit, total_pool_num=%u\n", + total_pools); + ret = -EBUSY; + goto l_end; + } + + adapter->cap |= SXE_MACVLAN_ENABLE | + SXE_SRIOV_ENABLE; + + if (total_pools < SXE_32_POOL && *pf_pools < SXE_16_POOL) { + assigned_pools = min_t(u16, + SXE_32_POOL - total_pools, + SXE_16_POOL - *pf_pools); + LOG_INFO_BDF("reserved %u pool to macvlan, 4 ring\n", assigned_pools); + } else if (*pf_pools < SXE_32_POOL) { + assigned_pools = min_t(u16, + SXE_POOLS_NUM_MAX - total_pools, + SXE_32_POOL - *pf_pools); + LOG_INFO_BDF("reserved %u pool to macvlan, 2 ring\n", assigned_pools); + } else { + assigned_pools = SXE_POOLS_NUM_MAX - total_pools; + LOG_INFO_BDF("reserved %u pool to macvlan, 1 ring\n", assigned_pools); + } + + if (!assigned_pools) { + LOG_ERROR_BDF("no remaining pool\n"); + ret = -EBUSY; + goto l_end; + } + + adapter->pool_f.pf_num_limit += assigned_pools; + + ret = sxe_ring_reassign(adapter, sxe_dcb_tc_get(adapter)); + if (ret) { + LOG_ERROR_BDF("ring reassign failed, ret=%d\n", ret); + goto l_end; + } + + if (pool >= *pf_pools) { + ret = -ENOMEM; + goto l_end; + } + } + + return 0; + +l_end: + return ret; +} + +STATIC void sxe_macvlan_offload_reset(struct sxe_adapter *adapter) +{ + s32 ret; + u32 rss = min_t(u32, SXE_RSS_RING_NUM_MAX, num_online_cpus()); + + if (!adapter->pool_f.vf_num_used) { + LOG_DEBUG_BDF("dont enable vf , adpater->cap=%x\n", adapter->cap); + adapter->cap &= ~(SXE_MACVLAN_ENABLE | SXE_SRIOV_ENABLE); + } + + LOG_WARN_BDF("macvlan off: go back to rss mode\n"); + adapter->ring_f.rss_limit = rss; + adapter->pool_f.pf_num_limit = 1; + ret = sxe_ring_reassign(adapter, sxe_dcb_tc_get(adapter)); + if (ret) { + LOG_ERROR_BDF("ring reassign failed, ret=%d\n", ret); + } + + return; +} + +static void *sxe_dfwd_add(struct net_device *pdev, struct net_device *vdev) +{ + s32 ret; + u32 pool; + + struct sxe_macvlan *accel; + struct sxe_adapter *adapter = netdev_priv(pdev); + + if (adapter->xdp_prog) { + LOG_MSG_WARN(probe, "l2fw offload is not supported with xdp\n"); + ret = -EINVAL; + goto l_err; + } + + LOG_DEBUG_BDF("macvlan offload start\n"); + + if (!macvlan_supports_dest_filter(vdev)) { + LOG_ERROR_BDF("macvlan mode err\n"); + ret = -EMEDIUMTYPE; + goto l_err; + } + +#ifndef HAVE_NO_SB_BIND_CHANNEL + if (netif_is_multiqueue(vdev)) { + LOG_ERROR_BDF("macvlan is multiqueue\n"); + ret = -ERANGE; + goto l_err; + } +#endif + + ret = sxe_macvlan_pools_assign(adapter); + if (ret < 0) { + goto l_err; + } + + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) { + LOG_ERROR_BDF("kzalloc failed\n"); + ret = -ENOMEM; + goto l_err; + } + + pool = find_first_zero_bit(adapter->vt_ctxt.pf_pool_bitmap, + adapter->pool_f.pf_num_used); + set_bit(pool, adapter->vt_ctxt.pf_pool_bitmap); +#ifndef HAVE_NO_SB_BIND_CHANNEL + netdev_set_sb_channel(vdev, pool); +#endif + accel->pool = pool; + accel->netdev = vdev; + + if (!netif_running(pdev)) { + goto l_end; + } + + ret = sxe_macvlan_ring_configure(adapter, accel); + if (ret < 0) { + goto l_err; + } + + LOG_INFO_BDF("macvlan offload success, pool=%d, ring_idx=%u \n", + pool, accel->tx_ring_offset); + +l_end: + return accel; + +l_err: + return ERR_PTR(ret); +} + +static void sxe_dfwd_del(struct net_device *pdev, void *priv) +{ + u32 i; + struct sxe_ring *ring; + struct sxe_irq_data *irq_priv; + struct sxe_macvlan *accel = priv; + u32 rxbase = accel->rx_ring_offset; + struct sxe_adapter *adapter = netdev_priv(pdev); + + sxe_uc_addr_del(&adapter->hw, adapter->mac_filter_ctxt.uc_addr_table, + accel->netdev->dev_addr, PF_POOL_INDEX(accel->pool)); + + usleep_range(SXE_UC_ADDR_DEL_WAIT_MIN, SXE_UC_ADDR_DEL_WAIT_MAX); + + for (i = 0; i < adapter->ring_f.ring_per_pool; i++) { + ring = adapter->rx_ring_ctxt.ring[rxbase + i]; + irq_priv = ring->irq_data; + + if (netif_running(adapter->netdev)) { + napi_synchronize(&irq_priv->napi); + } + ring->netdev = NULL; + } + +#ifndef HAVE_NO_SB_BIND_CHANNEL + netdev_unbind_sb_channel(pdev, accel->netdev); + netdev_set_sb_channel(accel->netdev, 0); +#endif + + LOG_INFO_BDF("macvlan del success, pool=%d, ring_idx=%u \n", + accel->pool, accel->tx_ring_offset); + + clear_bit(accel->pool, adapter->vt_ctxt.pf_pool_bitmap); + kfree(accel); + + return; +} + +#ifndef HAVE_NO_WALK_UPPER_DEV +#ifdef HAVE_NETDEV_NESTED_PRIV +STATIC int sxe_macvlan_up(struct net_device *vdev, + struct netdev_nested_priv *priv) +{ + struct sxe_adapter *adapter = (struct sxe_adapter *)priv->data; +#else +STATIC int sxe_macvlan_up(struct net_device *vdev, void *data) +{ + struct sxe_adapter *adapter = data; +#endif + struct sxe_macvlan *accel; + + if (!netif_is_macvlan(vdev)) + { + goto l_end; + } + + accel = macvlan_accel_priv(vdev); + if (!accel){ + goto l_end; + } + + sxe_macvlan_ring_configure(adapter, accel); + +l_end: + return 0; +} +#endif + +void sxe_macvlan_configure(struct sxe_adapter *adapter) +{ +#ifdef HAVE_NO_WALK_UPPER_DEV + struct net_device *upper; + struct list_head *iter; + int err; + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *macvlan = netdev_priv(upper); + struct sxe_macvlan *accel = macvlan->fwd_priv; + + if (macvlan->fwd_priv) { + err = sxe_macvlan_ring_configure(adapter, accel); + if (err) + continue; + } + } + } +#else +#ifdef HAVE_NETDEV_NESTED_PRIV + struct netdev_nested_priv priv = { + .data = (void *)adapter, + }; + + netdev_walk_all_upper_dev_rcu(adapter->netdev, + sxe_macvlan_up, &priv); +#else + netdev_walk_all_upper_dev_rcu(adapter->netdev, + sxe_macvlan_up, adapter); +#endif +#endif + + return; +} +#endif + +s32 sxe_ring_reassign(struct sxe_adapter *adapter, u8 tc) +{ + s32 ret; + struct net_device *dev = adapter->netdev; + +#ifdef SXE_DCB_CONFIGURE + ret = sxe_dcb_tc_validate(adapter, tc); + if (ret) { + goto l_end; + } +#endif + + if (netif_running(dev)) { + sxe_close(dev); + } else { + sxe_reset(adapter); + } + + set_bit(SXE_DOWN, &adapter->state); + + sxe_ring_irq_exit(adapter); + +#ifdef SXE_DCB_CONFIGURE + ret = sxe_dcb_tc_setup(adapter, tc); + if (ret) { + LOG_ERROR_BDF("dcb tc setup failed, tc=%u\n", tc); + goto l_end; + } +#endif + + ret = sxe_ring_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt ring assign scheme init failed, err=%d\n", ret); + goto l_end; + } + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT +#ifndef NO_NEED_POOL_DEFRAG + sxe_macvlan_pools_defrag(dev); +#endif +#endif + + if (netif_running(dev)) { + ret = sxe_open(dev); + LOG_INFO_BDF("open done, err=%d\n", ret); + } + +l_end: + return ret; +} + +static int sxe_set_mac_address(struct net_device *netdev, void *p) +{ + s32 ret = 0; + struct sockaddr *sock_addr = p; + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(sock_addr->sa_data)) { + ret = -SXE_ERR_INVALID_PARAM; + LOG_ERROR_BDF("invalid mac addr:%pM.(err:%d)\n", + sock_addr->sa_data, ret); + goto l_end; + } + +#ifndef HAVE_ETH_HW_ADDR_SET_API + memcpy(netdev->dev_addr, sock_addr->sa_data, netdev->addr_len); +#else + eth_hw_addr_set(netdev, sock_addr->sa_data); +#endif + memcpy(adapter->mac_filter_ctxt.cur_mac_addr, + sock_addr->sa_data, netdev->addr_len); + + sxe_mac_addr_set(adapter); + +l_end: + return ret; +} + +STATIC u16 sxe_available_uc_num_get(struct sxe_adapter *adapter, u16 pool) +{ + struct sxe_uc_addr_table *uc_table = + &adapter->mac_filter_ctxt.uc_addr_table[0]; + u16 i; + u16 count = 0; + + for (i = 1; i < SXE_UC_ENTRY_NUM_MAX; i++, uc_table++) { + if (test_bit(SXE_UC_ADDR_ENTRY_USED, &uc_table->state)) { + if (uc_table->pool != pool) { + continue; + } + } + + count++; + } + + LOG_DEBUG_BDF("get uc num = %u\n", count); + return count; +} + + +static int sxe_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, +#ifdef HAVE_NDO_FDB_ADD_EXTACK + u16 flags, + struct netlink_ext_ack *extack) +#else + u16 flags) +#endif +{ + int ret; + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct sxe_adapter *adapter = netdev_priv(dev); + u16 pool = PF_POOL_INDEX(0); + u16 available_num = sxe_available_uc_num_get(adapter, pool); + + if (netdev_uc_count(dev) >= available_num) { + LOG_ERROR_BDF("netdev_uc_count=%u >= available_num=%u\n", + netdev_uc_count(dev), available_num); + ret = -ENOMEM; + goto l_ret; + } + } + + ret = ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); + +l_ret: + return ret; +} + +static s32 sxe_bridge_mode_configure(struct sxe_adapter *adapter, + __u16 mode) +{ + struct sxe_hw *hw = &adapter->hw; + s32 ret = 0; + + switch (mode) { + case BRIDGE_MODE_VEPA: + hw->dma.ops->vt_pool_loopback_switch(hw, false); + + break; + case BRIDGE_MODE_VEB: + hw->dma.ops->vt_pool_loopback_switch(hw, true); + + + break; + default: + ret = -EINVAL; + LOG_ERROR_BDF("config hw[%p] bridge mode[%u], num_vfs[%u] failed\n", + hw, mode, adapter->vt_ctxt.num_vfs); + goto l_ret; + } + + adapter->bridge_mode = mode; + + LOG_MSG_INFO(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + +l_ret: + return ret; +} + +static int sxe_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK + u16 flags, + struct netlink_ext_ack *extack) +#else + u16 flags) +#endif +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + s32 ret = 0; + + if (!(adapter->cap & SXE_SRIOV_ENABLE)) { + LOG_ERROR_BDF("not in sriov mode,exit\n"); + ret = -EOPNOTSUPP; + goto l_ret; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) { + LOG_ERROR_BDF("can not find proper attr\n"); + ret = -EINVAL; + goto l_ret; + } + + nla_for_each_nested(attr, br_spec, rem) { + int status; + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) { + continue; + } + + if (nla_len(attr) < sizeof(mode)) { + LOG_ERROR_BDF("attr size[%u] < sizeof(mode)=%zu\n", + nla_len(attr), sizeof(mode)); + ret = -EINVAL; + goto l_ret; + } + + mode = nla_get_u16(attr); + status = sxe_bridge_mode_configure(adapter, mode); + if (status) { + LOG_ERROR_BDF("mode[0x%x] config failed:status=%d\n", + mode, status); + ret = status; + goto l_ret; + } + + break; + } + +l_ret: + return ret; +} + +static int sxe_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 filter_mask, int nlflags) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + s32 ret = 0; + + if (!(adapter->cap & SXE_SRIOV_ENABLE)) { + LOG_ERROR_BDF("not in sriov mode,exit\n"); + goto l_ret; + } + + LOG_DEBUG_BDF("get link:pid[%u], seq[%u], bridge_mode[0x%x], dev[%p], " + "filter_mask[0x%x], nlflags[0x%x]\n", pid, seq, + adapter->bridge_mode, dev, filter_mask, nlflags); + + ret = ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0, nlflags, + filter_mask, NULL); + +l_ret: + return ret; +} + +u32 sxe_mbps_link_speed_get(u32 speed) +{ + u32 mbps; + + switch(speed) { + case SXE_LINK_SPEED_10GB_FULL: + mbps = SXE_LINK_SPEED_MBPS_10G; + break; + case SXE_LINK_SPEED_1GB_FULL: + mbps = SXE_LINK_SPEED_MBPS_1G; + break; + case SXE_LINK_SPEED_100_FULL: + mbps = SXE_LINK_SPEED_MBPS_100; + break; + case SXE_LINK_SPEED_10_FULL: + mbps = SXE_LINK_SPEED_MBPS_10; + break; + default: + mbps = 0; + break; + } + + LOG_INFO("link speed:0x%x mbps speed:0x%x.\n", speed, mbps); + + return mbps; +} + +static int sxe_tx_maxrate_set(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + u32 bcnrc_val = sxe_mbps_link_speed_get(adapter->link.speed); + + if (!maxrate) { + goto l_end; + } + + bcnrc_val <<= SXE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= maxrate; + + bcnrc_val &= SXE_RTTBCNRC_RF_INT_MASK | SXE_RTTBCNRC_RF_DEC_MASK; + + bcnrc_val |= SXE_RTTBCNRC_RS_ENA; + + hw->dma.ops->dcb_tx_ring_rate_factor_set(hw, queue_index, bcnrc_val); + +l_end: + return 0; +} + +static const struct net_device_ops sxe_netdev_ops = { + .ndo_open = sxe_open, + .ndo_stop = sxe_close, + .ndo_start_xmit = sxe_xmit, + .ndo_set_rx_mode = sxe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = sxe_set_mac_address, +#ifdef HAVE_NET_DEVICE_EXTENDED + .ndo_size = sizeof(struct net_device_ops), + .extended.ndo_change_mtu = sxe_change_mtu, +#else + .ndo_change_mtu = sxe_change_mtu, +#endif + .ndo_tx_timeout = sxe_tx_timeout, + +#ifdef HAVE_NET_DEVICE_EXTENDED + .extended.ndo_set_tx_maxrate = sxe_tx_maxrate_set, +#else + .ndo_set_tx_maxrate = sxe_tx_maxrate_set, +#endif + .ndo_vlan_rx_add_vid = sxe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sxe_vlan_rx_kill_vid, + .ndo_set_vf_rate = sxe_set_vf_rate, +#ifdef HAVE_NET_DEVICE_EXTENDED + .extended.ndo_set_vf_vlan = sxe_set_vf_vlan, +#else + .ndo_set_vf_vlan = sxe_set_vf_vlan, +#endif + .ndo_set_vf_mac = sxe_set_vf_mac, + .ndo_set_vf_spoofchk = sxe_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = sxe_set_vf_rss_query_en, +#ifdef HAVE_NET_DEVICE_EXTENDED + .extended.ndo_set_vf_trust = sxe_set_vf_trust, +#else + .ndo_set_vf_trust = sxe_set_vf_trust, +#endif + + .ndo_get_vf_config = sxe_get_vf_config, + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = sxe_set_vf_link_state, +#endif + + .ndo_set_features = sxe_set_features, + .ndo_fix_features = sxe_fix_features, + .ndo_features_check = sxe_features_check, + + .ndo_get_stats64 = sxe_get_stats64, +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = sxe_ioctl, +#else + .ndo_do_ioctl = sxe_ioctl, +#endif + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + .ndo_dfwd_add_station = sxe_dfwd_add, + .ndo_dfwd_del_station = sxe_dfwd_del, +#endif + .ndo_fdb_add = sxe_fdb_add, + .ndo_bridge_setlink = sxe_bridge_setlink, + .ndo_bridge_getlink = sxe_bridge_getlink, + +#ifdef HAVE_XDP_SUPPORT + .ndo_bpf = sxe_xdp, + .ndo_xdp_xmit = sxe_xdp_xmit, +#ifdef HAVE_AF_XDP_ZERO_COPY +#ifdef HAVE_NDO_XSK_WAKEUP + .ndo_xsk_wakeup = sxe_xsk_wakeup, +#else + .ndo_xsk_async_xmit = sxe_xsk_async_xmit, +#endif +#endif +#endif +}; + +static void sxe_netdev_ops_init(struct net_device *netdev) +{ + netdev->netdev_ops = &sxe_netdev_ops; + return; +} + +bool netif_is_sxe(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &sxe_netdev_ops); +} + +STATIC void sxe_netdev_feature_init(struct net_device *netdev) +{ + struct sxe_adapter *adapter; + + netdev->features = NETIF_F_SG | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH; + + netdev->gso_partial_features = SXE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GSO_PARTIAL| + SXE_GSO_PARTIAL_FEATURES; + +#ifdef SXE_IPSEC_CONFIGURE + netdev->features |= NETIF_F_HW_ESP | + NETIF_F_HW_ESP_TX_CSUM | + NETIF_F_GSO_ESP; +#endif + + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + NETIF_F_HW_L2FW_DOFFLOAD | +#endif + NETIF_F_NTUPLE | + NETIF_F_LRO | + NETIF_F_RXALL; + + if (dma_get_mask(netdev->dev.parent) == DMA_BIT_MASK(SXE_DMA_BIT_WIDTH_64)) { + netdev->features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + + netdev->mpls_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM | + SXE_GSO_PARTIAL_FEATURES; + + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + adapter = netdev_priv(netdev); + adapter->cap |= SXE_LRO_CAPABLE; + + return; +} + +static void sxe_netdev_name_init(struct net_device *netdev, + struct pci_dev *pdev) +{ + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + return; +} + +#ifndef NO_NETDEVICE_MIN_MAX_MTU +static void sxe_netdev_mtu_init(struct net_device *netdev) +{ +#ifdef HAVE_NET_DEVICE_EXTENDED + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = SXE_MAX_JUMBO_FRAME_SIZE - SXE_ETH_DEAD_LOAD; +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = SXE_MAX_JUMBO_FRAME_SIZE - SXE_ETH_DEAD_LOAD; +#endif + return; +} +#endif + +static void sxe_netdev_priv_flags_init(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + return; +} + +void sxe_netdev_init(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + + sxe_netdev_ops_init(netdev); + + sxe_netdev_name_init(netdev, pdev); + + sxe_netdev_feature_init(netdev); + + sxe_netdev_priv_flags_init(netdev); + +#ifndef NO_NETDEVICE_MIN_MAX_MTU + sxe_netdev_mtu_init(netdev); +#endif + + sxe_ethtool_ops_set(netdev); + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.h new file mode 100644 index 000000000000..bfa513f69c94 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_netdev.h @@ -0,0 +1,58 @@ +#ifndef __SXE_NETDEV_H__ +#define __SXE_NETDEV_H__ + +#include +#include + +#include "sxe.h" + +#define SXE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +s32 sxe_link_config(struct sxe_adapter *adapter); + +int sxe_open(struct net_device *netdev); + +int sxe_close(struct net_device *netdev); + +void sxe_set_rx_mode(struct net_device *netdev); + +void __sxe_set_rx_mode(struct net_device *netdev, bool lock); + +bool netif_is_sxe(struct net_device *dev); + +void sxe_netdev_init(struct net_device *netdev, struct pci_dev *pdev); + +void sxe_down(struct sxe_adapter *adapter); + +void sxe_up(struct sxe_adapter *adapter); + +void sxe_terminate(struct sxe_adapter *adapter); + +void sxe_hw_reinit(struct sxe_adapter *adapter); + +void sxe_reset(struct sxe_adapter *adapter); + +void sxe_do_reset(struct net_device *netdev); + +s32 sxe_ring_reassign(struct sxe_adapter *adapter, u8 tc); + +s32 sxe_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); + +#ifndef NO_NEED_POOL_DEFRAG +void sxe_macvlan_pools_defrag(struct net_device *dev); +#endif + +void sxe_macvlan_configure(struct sxe_adapter *adapter); + +u32 sxe_sw_mtu_get(struct sxe_adapter *adapter); + +void sxe_stats_update(struct sxe_adapter *adapter); + +u32 sxe_mbps_link_speed_get(u32 speed); +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.c new file mode 100644 index 000000000000..f7ca0770c513 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.c @@ -0,0 +1,124 @@ + +#include "sxe.h" +#include "sxe_pci.h" + +bool sxe_check_cfg_fault(struct sxe_hw *hw, struct pci_dev *dev) +{ + u16 value; + struct sxe_adapter *adapter = hw->adapter; + + pci_read_config_word(dev, PCI_VENDOR_ID, &value); + if (value == SXE_READ_CFG_WORD_FAILED) { + sxe_hw_fault_handle(hw); + LOG_ERROR_BDF("pci vendorId:0x%x read pci config word fail," + "remove adapter.\n", PCI_VENDOR_ID); + return true; + } + + return false; +} + +u16 sxe_read_pci_cfg_word(struct pci_dev *pdev, struct sxe_hw *hw, u32 reg) +{ + u16 value = SXE_READ_CFG_WORD_FAILED; + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + pci_read_config_word(pdev, reg, &value); + if (SXE_READ_CFG_WORD_FAILED == value) { + sxe_check_cfg_fault(hw, pdev); + } + +l_end: + return value; +} + + +#ifdef CONFIG_PCI_IOV +u32 sxe_read_pci_cfg_dword(struct sxe_adapter *adapter, u32 reg) +{ + struct sxe_hw *hw = &adapter->hw; + u32 value = SXE_FAILED_READ_CFG_DWORD; + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + pci_read_config_dword(adapter->pdev, reg, &value); + if (SXE_FAILED_READ_CFG_DWORD == value) { + sxe_check_cfg_fault(hw, adapter->pdev); + } + +l_end: + return value; +} +#endif + +u32 sxe_pcie_timeout_poll(struct pci_dev *pdev, struct sxe_hw *hw) +{ + u16 devctl2; + u32 pollcnt; + + devctl2 = sxe_read_pci_cfg_word(pdev, hw, SXE_PCI_DEVICE_CONTROL2); + devctl2 &= SXE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case SXE_PCIDEVCTRL2_65_130ms: + pollcnt = 1300; + break; + case SXE_PCIDEVCTRL2_260_520ms: + pollcnt = 5200; + break; + case SXE_PCIDEVCTRL2_1_2s: + pollcnt = 20000; + break; + case SXE_PCIDEVCTRL2_4_8s: + pollcnt = 80000; + break; + case SXE_PCIDEVCTRL2_17_34s: + pollcnt = 34000; + break; + case SXE_PCIDEVCTRL2_50_100us: + case SXE_PCIDEVCTRL2_1_2ms: + case SXE_PCIDEVCTRL2_16_32ms: + case SXE_PCIDEVCTRL2_16_32ms_def: + default: + pollcnt = 800; + break; + } + + return (pollcnt * 11) / 10; +} + +unsigned long sxe_get_completion_timeout(struct sxe_adapter *adapter) +{ + u16 devctl2; + + pcie_capability_read_word(adapter->pdev, PCI_EXP_DEVCTL2, &devctl2); + + switch (devctl2 & SXE_PCIDEVCTRL2_TIMEO_MASK) { + case SXE_PCIDEVCTRL2_17_34s: + case SXE_PCIDEVCTRL2_4_8s: + case SXE_PCIDEVCTRL2_1_2s: + return 2000000ul; + case SXE_PCIDEVCTRL2_260_520ms: + return 520000ul; + case SXE_PCIDEVCTRL2_65_130ms: + return 130000ul; + case SXE_PCIDEVCTRL2_16_32ms: + return 32000ul; + case SXE_PCIDEVCTRL2_1_2ms: + return 2000ul; + case SXE_PCIDEVCTRL2_50_100us: + return 100ul; + case SXE_PCIDEVCTRL2_16_32ms_def: + return 32000ul; + default: + break; + } + + return 32000ul; +} + diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.h new file mode 100644 index 000000000000..a1644aa9ee41 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_pci.h @@ -0,0 +1,26 @@ +#ifndef _SXE_PCI_H_ +#define _SXE_PCI_H_ + +#include "sxe.h" + +#define PCI_VENDOR_ID_STARS 0x1FF2 +#define SXE_DEV_ID_ASIC 0x10a1 + +#define SXE_DMA_BIT_WIDTH_64 64 +#define SXE_DMA_BIT_WIDTH_32 32 + +#define SXE_READ_CFG_WORD_FAILED 0xFFFFU + +#define SXE_FAILED_READ_CFG_DWORD 0xFFFFFFFFU + +u16 sxe_read_pci_cfg_word(struct pci_dev *pdev, struct sxe_hw *hw, u32 reg); + +bool sxe_check_cfg_fault(struct sxe_hw *hw, struct pci_dev *dev); + +unsigned long sxe_get_completion_timeout(struct sxe_adapter *adapter); + +u32 sxe_pcie_timeout_poll(struct pci_dev *pdev, struct sxe_hw *hw); + +u32 sxe_read_pci_cfg_dword(struct sxe_adapter *adapter, u32 reg); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.c new file mode 100644 index 000000000000..8487e110cfa7 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.c @@ -0,0 +1,1167 @@ +#include "sxe.h" +#include "sxe_log.h" +#include "sxe_phy.h" +#include "sxe_msg.h" +#include "sxe_netdev.h" +#include "sxe_filter.h" +#include "sxe_version.h" +#include "sxe_host_hdc.h" +#include "sxe_errno.h" + +#define SXE_COMPAT_SFP_NUM (sizeof(sfp_vendor_pn_list) / \ + sizeof(sfp_vendor_pn_list[0])) + +STATIC u8 sfp_vendor_pn_list[][SXE_SFP_VENDOR_PN_SIZE] = { + {0x58, 0x50, 0x2d, 0x33, 0x47, 0x31, 0x30, 0x2d, \ + 0x31, 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x58, 0x50, 0x2d, 0x38, 0x47, 0x31, 0x30, 0x2d, \ + 0x30, 0x31, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x53, 0x32, 0x31, 0x38, 0x35, 0x2d, 0x30, 0x44, \ + 0x33, 0x43, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x59, 0x56, 0x30, 0x32, 0x2d, 0x43, 0x30, 0x31, \ + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x59, 0x56, 0x30, 0x32, 0x2d, 0x43, 0x30, 0x31, \ + 0x2d, 0x30, 0x31, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x50, 0x39, 0x5a, 0x31, 0x53, 0x4d, 0x41, 0x30, \ + 0x31, 0x2d, 0x53, 0x44, 0x2d, 0x54, 0x20, 0x20}, + + {0x41, 0x46, 0x43, 0x54, 0x2d, 0x37, 0x33, 0x39, \ + 0x53, 0x4d, 0x5a, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x41, 0x46, 0x42, 0x52, 0x2d, 0x37, 0x30, 0x39, \ + 0x44, 0x4d, 0x5a, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x41, 0x46, 0x42, 0x52, 0x2d, 0x37, 0x31, 0x30, \ + 0x53, 0x4d, 0x5a, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x41, 0x46, 0x43, 0x54, 0x2d, 0x37, 0x33, 0x39, \ + 0x44, 0x4d, 0x5a, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x52, 0x54, 0x58, 0x4d, 0x32, 0x32, 0x38, 0x2d, \ + 0x34, 0x30, 0x31, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x53, 0x46, 0x50, 0x2d, 0x4d, 0x4d, 0x38, 0x35, \ + 0x54, 0x47, 0x2d, 0x53, 0x33, 0x44, 0x43, 0x20}, + + {0x46, 0x54, 0x4c, 0x58, 0x38, 0x35, 0x37, 0x34, \ + 0x44, 0x33, 0x42, 0x43, 0x4c, 0x20, 0x20, 0x20}, + + {0x46, 0x54, 0x4c, 0x58, 0x38, 0x35, 0x37, 0x34, \ + 0x44, 0x33, 0x42, 0x43, 0x56, 0x20, 0x20, 0x20}, + + {0x46, 0x54, 0x4c, 0x58, 0x31, 0x34, 0x37, 0x35, \ + 0x44, 0x33, 0x42, 0x43, 0x4c, 0x20, 0x20, 0x20}, + + {0x46, 0x43, 0x4c, 0x46, 0x38, 0x35, 0x32, 0x32, \ + 0x50, 0x32, 0x42, 0x54, 0x4c, 0x20, 0x20, 0x20}, + + {0x46, 0x43, 0x42, 0x47, 0x31, 0x31, 0x30, 0x53, \ + 0x44, 0x31, 0x43, 0x30, 0x35, 0x20, 0x20, 0x20}, + + {0x41, 0x46, 0x42, 0x52, 0x2d, 0x37, 0x30, 0x39, \ + 0x44, 0x4d, 0x5a, 0x2d, 0x49, 0x4e, 0x33, 0x20}, + + {0x46, 0x43, 0x42, 0x47, 0x31, 0x31, 0x30, 0x53, \ + 0x44, 0x31, 0x43, 0x30, 0x35, 0x20, 0x20, 0x20}, + + {0x58, 0x50, 0x41, 0x43, 0x2d, 0x38, 0x47, 0x31, \ + 0x30, 0x2d, 0x30, 0x35, 0x20, 0x20, 0x20, 0x20}, + + {0x58, 0x50, 0x44, 0x43, 0x2d, 0x47, 0x31, 0x30, \ + 0x2d, 0x30, 0x32, 0x20, 0x20, 0x20, 0x20, 0x20}, + + {0x52, 0x54, 0x58, 0x4d, 0x32, 0x32, 0x38, 0x2d, \ + 0x35, 0x35, 0x31, 0x20, 0x20, 0x20, 0x20, 0x20}, +}; + +#ifdef SXE_PHY_CONFIGURE +int sxe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +{ + s32 ret; + u16 value; + s32 regnum = addr; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + if (adapter->phy_ctxt.phy_info.mii_bus) { + if (devad != MDIO_DEVAD_NONE) { + regnum |= (devad << SXE_DEVAD_SHIFT) | MII_ADDR_C45; + } + + ret = mdiobus_read(adapter->phy_ctxt.phy_info.mii_bus, prtad, regnum); + goto l_end; + } + + if (prtad != adapter->phy_ctxt.phy_info.mdio.prtad) { + LOG_ERROR_BDF("not the current phy, prted=%d, current prted=%d\n", + prtad, adapter->phy_ctxt.phy_info.mdio.prtad); + ret = -EINVAL; + goto l_end; + } + + ret = hw->phy.ops->reg_read(hw, prtad, addr, devad, &value); + ret = value; + +l_end: + return ret; +} + +int sxe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + s32 ret; + u32 regnum = addr; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_hw *hw = &adapter->hw; + + if (adapter->phy_ctxt.phy_info.mii_bus) { + if (devad != MDIO_DEVAD_NONE) { + regnum |= (devad << SXE_DEVAD_SHIFT) | MII_ADDR_C45; + } + + ret = mdiobus_write(adapter->phy_ctxt.phy_info.mii_bus, + prtad, regnum, value); + if (ret) { + LOG_ERROR_BDF("mdio write failed, prtad=%d, ret=%d\n", + prtad, ret); + goto l_end; + } + } + + if (prtad != adapter->phy_ctxt.phy_info.mdio.prtad) { + LOG_ERROR_BDF("not the current phy, prted=%d, current prted=%d\n", + prtad, adapter->phy_ctxt.phy_info.mdio.prtad); + ret = -EINVAL; + goto l_end; + } + + ret = hw->phy.ops->reg_write(hw, prtad, addr, devad, value); + +l_end: + return ret; +} + +static s32 sxe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +{ + s32 ret; + u16 value; + u32 device_type, reg_addr; + struct sxe_adapter *adapter = bus->priv; + struct sxe_hw *hw = &adapter->hw; + + reg_addr = regnum & GENMASK(15, 0); + device_type = (regnum & GENMASK(16,20) >> SXE_MII_DEV_TYPE_SHIFT); + + ret = hw->phy.ops->reg_read(hw, addr, + reg_addr, device_type, &value); + if (ret) { + LOG_ERROR_BDF("mii read failed, reg_addr=%d, device_type=%d, prtad=%d\n", + reg_addr, device_type, addr); + ret = -EBUSY; + goto l_end; + } + + ret = value; + +l_end: + return ret; +} + +static s32 sxe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + s32 ret; + u32 reg_addr; + u32 device_type; + struct sxe_adapter *adapter = bus->priv; + struct sxe_hw *hw = &adapter->hw; + + reg_addr = regnum & GENMASK(15, 0); + device_type = (regnum & GENMASK(16,20) >> SXE_MII_DEV_TYPE_SHIFT); + + ret = hw->phy.ops->reg_write(hw, addr, + reg_addr, device_type, val); + if (ret) { + LOG_ERROR_BDF("mii write failed, reg_addr=%d, " + "device_type=%d, prtad=%d, val=%u\n", + reg_addr, device_type, addr, val); + ret = -EBUSY; + } + + return ret; +} + +static u32 sxe_get_phy_type_from_id(u32 phy_id) +{ + u32 phy_type; + + switch (phy_id) { + case SXE_MARVELL_88X3310_PHY_ID: + phy_type = SXE_PHY_MARVELL_88X3310; + break; + default: + phy_type = SXE_PHY_UNKNOWN; + } + + return phy_type; +} + +static bool sxe_phy_probe(struct sxe_adapter *adapter, u16 phy_addr) +{ + s32 ret; + u16 ext_ability = 0; + struct sxe_phy_info *phy = &adapter->phy_ctxt.phy_info; + struct sxe_hw *hw = &adapter->hw; + + phy->mdio.prtad = phy_addr; + if (mdio45_probe(&phy->mdio, phy_addr) != 0) { + ret = -SXE_ERR_PHY_NOT_PERSENT; + LOG_WARN("mdio probe failed\n"); + goto l_end; + } + + ret = hw->phy.ops->identifier_get(hw, phy_addr, &phy->id); + if (ret) { + LOG_ERROR_BDF("get phy id failed, prtad=%d\n", phy_addr); + goto l_end; + } + + phy->type = sxe_get_phy_type_from_id(phy->id); + if (SXE_PHY_UNKNOWN == phy->type) { + ret = hw->phy.ops->reg_read(hw, phy_addr, MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, &ext_ability); + if (ret) { + LOG_ERROR_BDF("get phy extended ability failed, prtad=%d\n", + phy_addr); + goto l_end; + } + + if (ext_ability & \ + (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT)) { + phy->type = SXE_PHY_CU_UNKNOWN; + } else { + phy->type = SXE_PHY_GENERIC; + } + } + +l_end: + return ret; +} + +s32 sxe_phy_identify(struct sxe_adapter *adapter) +{ + s32 ret; + u32 phy_addr; + struct sxe_phy_info *phy = &adapter->phy_ctxt.phy_info; + + for (phy_addr = 0; phy_addr < SXE_PHY_ADDR_MAX; phy_addr++) { + ret = sxe_phy_probe(adapter, phy_addr); + if (!ret) { + LOG_INFO_BDF("phy probe seccess, prtad=%d, phy_type=%d\n", + phy->mdio.prtad, phy->type); + goto l_end; + } + } + + phy->mdio.prtad = MDIO_PRTAD_NONE; + ret = -SXE_ERR_PHY_NOT_PERSENT; + +l_end: + return ret; +} + +void sxe_phy_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed, + bool *autoneg) +{ + s32 ret = 0; + struct sxe_hw *hw = &adapter->hw; + + *autoneg = true; + + if (adapter->phy_ctxt.speed !=0) { + *speed = adapter->phy_ctxt.speed; + goto l_end; + } + + ret = adapter->hw.phy.ops->link_cap_get(hw, + adapter->phy_ctxt.phy_info.mdio.prtad, speed); + if (ret) { + LOG_ERROR_BDF("get link speed cap=%d\n", *speed); + goto l_end; + } + + LOG_INFO_BDF("phy link speed cap=%d\n", *speed); + +l_end: + return ret; +} + +s32 sxe_phy_reset(struct sxe_adapter *adapter) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + struct sxe_phy_context *phy = &adapter->phy_ctxt; + + if (phy->phy_info.type == SXE_PHY_UNKNOWN) { + ret = phy->ops->identify(adapter); + if (ret) { + LOG_ERROR_BDF("phy identify failed, ret=%d\n", ret); + goto l_end; + } + } + + ret = hw->phy.ops->reset(hw, phy->phy_info.mdio.prtad); + if (ret) { + LOG_ERROR_BDF("phy reset failed, ret=%d\n", ret); + } + +l_end: + return ret; +} + +static s32 sxe_phy_link_autoneg_configure(struct sxe_adapter *adapter) +{ + s32 ret; + u32 speed; + u16 autoneg_reg ; + bool autoneg = false; + struct sxe_hw *hw = &adapter->hw; + struct sxe_phy_info *phy = &adapter->phy_ctxt.phy_info; + + sxe_phy_link_capabilities_get(adapter, &speed, &autoneg); + + ret = hw->phy.ops->reg_read(hw, phy->mdio.prtad, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, &autoneg_reg); + if (ret) { + LOG_ERROR_BDF("get speed 10gb reg failed, ret=%d\n", ret); + } + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if ((adapter->phy_ctxt.autoneg_advertised & SXE_LINK_SPEED_10GB_FULL) && + (speed & SXE_LINK_SPEED_10GB_FULL)) { + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + } + + hw->phy.ops->reg_write(hw, phy->mdio.prtad, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, autoneg_reg); + + ret = hw->phy.ops->reg_read(hw, phy->mdio.prtad, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, &autoneg_reg); + autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF); + if ((adapter->phy_ctxt.autoneg_advertised & SXE_LINK_SPEED_10GB_FULL) && + (speed & SXE_LINK_SPEED_10GB_FULL)) { + autoneg_reg |= ADVERTISE_100FULL; + } + + hw->phy.ops->reg_write(hw, phy->mdio.prtad, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + + ret = hw->phy.ops->reg_read(hw, phy->mdio.prtad, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + hw->phy.ops->reg_write(hw, phy->mdio.prtad, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); +l_end: + return ret; +} + +s32 sxe_phy_link_speed_configure(struct sxe_adapter *adapter, u32 speed) +{ + s32 ret; + struct sxe_phy_context *phy = &adapter->phy_ctxt; + + phy->autoneg_advertised = 0; + + if (speed & SXE_LINK_SPEED_10GB_FULL) { + phy->autoneg_advertised |= SXE_LINK_SPEED_10GB_FULL; + } + + if (speed & SXE_LINK_SPEED_1GB_FULL) { + phy->autoneg_advertised |= SXE_LINK_SPEED_1GB_FULL; + } + + if (speed & SXE_LINK_SPEED_100_FULL) { + phy->autoneg_advertised |= SXE_LINK_SPEED_100_FULL; + } + + if (speed & SXE_LINK_SPEED_10_FULL) { + phy->autoneg_advertised |= SXE_LINK_SPEED_10_FULL; + } + + ret = sxe_phy_link_autoneg_configure(adapter); + if (ret) { + LOG_ERROR_BDF("phy autoneg config failed, ret=%d\n", ret); + } + + return ret; +} + +s32 sxe_mdiobus_init(struct sxe_adapter *adapter) +{ + s32 ret; + struct mii_bus *bus; + struct mdio_if_info *mdio = &adapter->phy_ctxt.phy_info.mdio; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + + mdio->prtad = MDIO_PRTAD_NONE; + mdio->mmds = 0; + mdio->mode_support = MDIO_SUPPORTS_C45; + mdio->dev = netdev; + mdio->mdio_read = sxe_mdio_read; + mdio->mdio_write = sxe_mdio_write; + + bus = devm_mdiobus_alloc(dev); + if (!bus) { + LOG_ERROR_BDF("mdio bus alloc failed\n"); + ret = -ENOMEM; + goto l_end; + } + + bus->read = &sxe_mii_bus_read; + bus->write = &sxe_mii_bus_write; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", SXE_DRV_NAME, + pci_name(pdev)); + + bus->name = "sxe-mdio"; + bus->priv = adapter; + bus->parent = dev; + bus->phy_mask = GENMASK(31, 0); + + ret = mdiobus_register(bus); + if (ret) { + LOG_ERROR_BDF("mdio bus register failed, ret=%d\n", ret); + goto l_free; + } + + LOG_INFO_BDF("phy init ok\n"); + adapter->phy_ctxt.phy_info.mii_bus = bus; + return 0; + +l_free: + devm_mdiobus_free(dev, bus); +l_end: + return ret; +} + +void sxe_mdiobus_exit(struct sxe_adapter *adapter) +{ + if (adapter->phy_ctxt.phy_info.mii_bus) { + mdiobus_unregister(adapter->phy_ctxt.phy_info.mii_bus); + } + + return; +} +#endif + +s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset, + u16 len, u8 *data) +{ + s32 ret; + struct sxe_driver_cmd cmd; + struct sxe_sfp_rw_req req; + struct sxe_sfp_read_resp *resp; + u16 resp_len = sizeof(struct sxe_sfp_read_resp) + len; + struct sxe_hw *hw = &adapter->hw; + + if (!data) { + ret = -EINVAL; + LOG_ERROR_BDF("sfp read buff == NULL\n"); + goto l_end; + } + + if (len > SXE_SFP_EEPROM_SIZE_MAX) { + ret = -EINVAL; + LOG_ERROR_BDF("sfp read size[%u] > eeprom max size[%d], ret=%d\n", + len, SXE_SFP_EEPROM_SIZE_MAX, ret); + goto l_end; + } + + LOG_INFO_BDF("sfp read, offset=%u, len=%u\n", offset, len); + + req.len = len; + req.offset = offset; + + resp = kmalloc(resp_len, GFP_KERNEL); + if (!resp) { + ret = -ENOMEM; + LOG_ERROR_BDF("sfp read, alloc resp mem failed\n"); + goto l_end; + } + + cmd.req = &req; + cmd.req_len = sizeof(struct sxe_sfp_rw_req); + cmd.resp = resp; + cmd.resp_len = resp_len; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_SFP_READ; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("sfp read, hdc failed, offset=%u, len=%u, ret=%d\n", + offset, len, ret); + ret = -EIO; + goto l_free; + } + + if (resp->len != len) { + ret = -EIO; + LOG_ERROR_BDF("sfp read failed, offset=%u, len=%u\n", offset, len); + goto l_free; + } + + memcpy(data, resp->resp, len); + +l_free: + kfree(resp); + +l_end: + return ret; +} + +s32 sxe_sfp_eeprom_write(struct sxe_adapter *adapter, u16 offset, + u32 len, u8 *data) +{ + s32 ret; + struct sxe_driver_cmd cmd; + struct sxe_sfp_rw_req *req; + u16 req_len = sizeof(struct sxe_sfp_rw_req) + len; + struct sxe_hw *hw = &adapter->hw; + + if (!data) { + ret = -EINVAL; + LOG_ERROR_BDF("sfp write data == NULL\n"); + goto l_end; + } + + if (len > SXE_SFP_EEPROM_SIZE_MAX) { + ret = -EINVAL; + LOG_ERROR_BDF("sfp write size[%u] > eeprom max size[%d], ret=%d\n", + len, SXE_SFP_EEPROM_SIZE_MAX, ret); + goto l_end; + } + + LOG_INFO_BDF("sfp write, offset=%u, len=%u\n", offset, len); + + req = kmalloc(req_len, GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + LOG_ERROR_BDF("sfp write, alloc req mem failed\n"); + goto l_end; + } + + req->len = len; + req->offset = offset; + memcpy(req->write_data, data, len); + + cmd.req = req; + cmd.req_len = req_len; + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_SFP_WRITE; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("sfp write failed, offset=%u, len=%u, ret=%d\n", + offset, len, ret); + } + + kfree(req); + +l_end: + return ret; +} + +static s32 sxe_sfp_tx_laser_ctrl(struct sxe_adapter *adapter, bool is_disable) +{ + s32 ret; + struct sxe_driver_cmd cmd; + sxe_spp_tx_able_s laser_disable; + struct sxe_hw *hw = &adapter->hw; + + laser_disable.isDisable = is_disable; + LOG_INFO_BDF("sfp tx laser ctrl start, is_disable=%x\n", is_disable); + + cmd.req = &laser_disable; + cmd.req_len = sizeof(laser_disable); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_TX_DIS_CTRL; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("sfp tx laser ctrl failed, ret=%d\n", ret); + goto l_end; + } + + LOG_INFO_BDF("sfp tx laser ctrl success, is_disable=%x\n", is_disable); + +l_end: + return ret; +} + +static void sxe_sfp_tx_laser_enable(struct sxe_adapter *adapter) +{ + sxe_sfp_tx_laser_ctrl(adapter, false); + + return; +} + +void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter) +{ + sxe_sfp_tx_laser_ctrl(adapter, true); + + return; +} + +s32 sxe_sfp_reset(struct sxe_adapter *adapter) +{ + + LOG_INFO_BDF("auto_restart:%u.\n", adapter->hw.mac.auto_restart); + + if (adapter->hw.mac.auto_restart) { + sxe_sfp_tx_laser_disable(adapter); + sxe_sfp_tx_laser_enable(adapter); + adapter->hw.mac.auto_restart = false; + } + + return 0; +} + +static void sxe_sfp_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed, + bool *autoneg) +{ + struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info; + + *speed = 0; + + if (sfp->type == SXE_SFP_TYPE_1G_CU || + sfp->type == SXE_SFP_TYPE_1G_SXLX ) { + *speed = SXE_LINK_SPEED_1GB_FULL; + *autoneg = true; + goto l_end; + } + + *speed = SXE_LINK_SPEED_10GB_FULL; + *autoneg = false; + + if (sfp->multispeed_fiber) { + *speed |= SXE_LINK_SPEED_10GB_FULL | SXE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + +l_end: + LOG_INFO_BDF("sfp link speed cap=%d\n", *speed); + return; +} + +static s32 sxe_sfp_rate_select(struct sxe_adapter *adapter, sxe_sfp_rate_e rate) +{ + s32 ret; + struct sxe_driver_cmd cmd; + sxe_sfp_rate_able_s rate_able; + struct sxe_hw *hw = &adapter->hw; + + rate_able.rate = rate; + LOG_INFO_BDF("sfp tx rate select start, rate=%d\n", rate); + + cmd.req = &rate_able; + cmd.req_len = sizeof(rate_able); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_RATE_SELECT; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("sfp rate select failed, ret=%d\n", ret); + } + + LOG_INFO_BDF("sfp tx rate select end, rate=%d\n", rate); + + return ret; +} + +s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode, + u32 max_frame) +{ + s32 ret; + sxe_pcs_cfg_s pcs_cfg; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + pcs_cfg.mode = mode; + pcs_cfg.mtu = max_frame; + + cmd.req = &pcs_cfg; + cmd.req_len = sizeof(pcs_cfg); + cmd.resp = NULL; + cmd.resp_len = 0; + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_PCS_SDS_INIT; + cmd.is_interruptible = true; + sxe_sfp_tx_laser_disable(adapter); + ret = sxe_driver_cmd_trans(hw, &cmd); + sxe_sfp_tx_laser_enable(adapter); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:pcs init\n", ret); + goto l_end; + } + + sxe_fc_mac_addr_set(adapter); + + LOG_INFO_BDF("mode:%u pcs sds init done.\n", mode); + +l_end: + return ret; +} + +s32 sxe_multispeed_sfp_link_configure(struct sxe_adapter *adapter, u32 speed) +{ + s32 ret = 0; + bool autoneg, link_up; + u32 i, speed_cap, link_speed, speedcnt = 0; + struct sxe_hw *hw = &adapter->hw; + u32 highest_link_speed = SXE_LINK_SPEED_UNKNOWN; + u32 max_frame = sxe_sw_mtu_get(adapter); + + sxe_sfp_link_capabilities_get(adapter, &speed_cap, &autoneg); + + speed &= speed_cap; + + sxe_link_info_get(adapter, &link_speed, &link_up); + if ((link_up == true) && (speed & link_speed)) { + LOG_INFO_BDF("link cfg dont changed , dont need cfp pcs," + "speed=%x, mtu=%u\n", speed, max_frame); + goto l_end; + } + + if (speed & SXE_LINK_SPEED_10GB_FULL) { + LOG_INFO_BDF("10G link cfg start\n"); + + speedcnt++; + highest_link_speed = SXE_LINK_SPEED_10GB_FULL; + + ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_10G); + if (ret) { + LOG_ERROR_BDF("set sfp rate failed, ret=%d\n", ret); + goto l_end; + } + + msleep(SXE_RATE_SEL_WAIT); + + ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_10GBASE_KR_WO, + max_frame); + if (ret) { + goto l_end; + } + + + for (i = 0; i < SXE_LINK_UP_RETRY_CNT; i++) { + msleep(SXE_LINK_UP_RETRY_ITR); + + sxe_link_info_get(adapter, &link_speed, &link_up); + if (link_up) { + LOG_INFO_BDF("link cfg end, link up, speed is 10G\n"); + goto l_out; + } + } + + LOG_WARN_BDF("10G link cfg failed, retry...\n"); + } + + if (speed & SXE_LINK_SPEED_1GB_FULL) { + LOG_INFO_BDF("1G link cfg start\n"); + + speedcnt++; + if (highest_link_speed == SXE_LINK_SPEED_UNKNOWN) { + highest_link_speed = SXE_LINK_SPEED_1GB_FULL; + } + + ret = sxe_sfp_rate_select(adapter, SXE_SFP_RATE_1G); + if (ret) { + LOG_ERROR_BDF("set sfp rate failed, ret=%d\n", ret); + goto l_end; + } + + msleep(SXE_RATE_SEL_WAIT); + + ret = sxe_pcs_sds_init(adapter, SXE_PCS_MODE_1000BASE_KX_W, + max_frame); + if (ret) { + goto l_end; + } + + msleep(SXE_SFP_RESET_WAIT); + + link_up = hw->mac.ops->link_up_1g_check(hw); + if (link_up) { + LOG_INFO_BDF("link cfg end, link up, speed is 1G\n"); + goto l_out; + } + + LOG_WARN_BDF("1G link cfg failed, retry...\n"); + } + + if (speedcnt > 1) { + ret = sxe_multispeed_sfp_link_configure(adapter, highest_link_speed); + } +l_out: + + adapter->phy_ctxt.autoneg_advertised = 0; + + if (speed & SXE_LINK_SPEED_10GB_FULL) { + adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_10GB_FULL; + } + + if (speed & SXE_LINK_SPEED_1GB_FULL) { + adapter->phy_ctxt.autoneg_advertised |= SXE_LINK_SPEED_1GB_FULL; + } + +l_end: + return ret; +} + +void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up) +{ + struct sxe_hw *hw = &adapter->hw; + + *link_up = hw->mac.ops->link_state_is_up(hw); + if (false == *link_up) { + LOG_INFO_BDF("link state =%d, (1=link_up, 0=link_down)\n", + *link_up); + *link_speed = SXE_LINK_SPEED_UNKNOWN; + } else { + *link_speed = hw->mac.ops->link_speed_get(hw); + } + + return; +} + +STATIC s32 sxe_an_cap_get(struct sxe_adapter *adapter, sxe_an_cap_s *an_cap) +{ + s32 ret; + struct sxe_driver_cmd cmd; + struct sxe_hw *hw = &adapter->hw; + + cmd.req = NULL; + cmd.req_len = 0; + cmd.resp = an_cap; + cmd.resp_len = sizeof(*an_cap); + cmd.trace_id = 0; + cmd.opcode = SXE_CMD_AN_CAP_GET; + cmd.is_interruptible = true; + ret = sxe_driver_cmd_trans(hw, &cmd); + if (ret) { + LOG_ERROR_BDF("hdc trans failed ret=%d, cmd:negotiation cap get\n", ret); + } + + return ret; +} + +static s32 sxe_sfp_fc_autoneg(struct sxe_adapter *adapter) +{ + s32 ret; + sxe_an_cap_s an_cap; + struct sxe_hw *hw = &adapter->hw; + + ret = sxe_an_cap_get(adapter, &an_cap); + if (ret) { + LOG_ERROR_BDF("get auto negotiate capacity failed, ret=%d\n", ret); + goto l_end; + } + + if ((an_cap.local.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE) && + (an_cap.peer.pause_cap & SXE_PAUSE_CAP_SYMMETRIC_PAUSE)) { + if (hw->fc.requested_mode == SXE_FC_FULL) { + hw->fc.current_mode = SXE_FC_FULL; + LOG_DEV_DEBUG("flow control = full.\n"); + } else { + hw->fc.current_mode = SXE_FC_RX_PAUSE; + LOG_DEV_DEBUG("flow control=RX PAUSE frames only\n"); + } + } else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE) && + (an_cap.peer.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE)) { + hw->fc.current_mode = SXE_FC_TX_PAUSE; + LOG_DEV_DEBUG("flow control = TX PAUSE frames only.\n"); + } else if ((an_cap.local.pause_cap == SXE_PAUSE_CAP_BOTH_PAUSE) && + (an_cap.peer.pause_cap == SXE_PAUSE_CAP_ASYMMETRIC_PAUSE)) { + hw->fc.current_mode = SXE_FC_RX_PAUSE; + LOG_DEV_DEBUG("flow control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = SXE_FC_NONE; + LOG_DEV_DEBUG("flow control = none.\n"); + } + hw->fc.requested_mode = hw->fc.current_mode; + +l_end: + return ret; +} + +static void sxe_fc_autoneg(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + s32 ret = -SXE_ERR_FC_NOT_NEGOTIATED; + bool link_up; + u32 link_speed; + if (hw->fc.disable_fc_autoneg) { + LOG_INFO_BDF("disable fc autoneg\n"); + goto l_end; + } + + sxe_link_info_get(adapter, &link_speed, &link_up); + if (!link_up) { + LOG_INFO_BDF("link down, dont fc autoneg\n"); + goto l_end; + } + + ret = sxe_sfp_fc_autoneg(adapter); +l_end: + if (ret) { + hw->fc.current_mode = hw->fc.requested_mode; + } + + return; +} + +void sxe_fc_enable(struct sxe_adapter *adapter) +{ + s32 ret; + u32 i; + struct sxe_hw *hw = &adapter->hw; + + if (!hw->fc.pause_time) { + LOG_ERROR_BDF("link fc disabled since pause time is 0\n"); + hw->fc.requested_mode = hw->fc.current_mode; + ret = -SXE_ERR_INVALID_LINK_SETTINGS; + goto l_end; + } + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & SXE_FC_TX_PAUSE) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + LOG_DEV_DEBUG("invalid water mark configuration, " + "tc[%u] low_water=%u, high_water=%u\n", + i, hw->fc.low_water[i], + hw->fc.high_water[i]); + hw->fc.requested_mode = hw->fc.current_mode; + ret = -SXE_ERR_INVALID_LINK_SETTINGS; + goto l_end; + } + } + } + + sxe_fc_autoneg(adapter); + + ret = hw->mac.ops->fc_enable(hw); + if (ret) { + LOG_ERROR_BDF("link fc enable failed, ret=%d\n", ret); + } + +l_end: + return; +} + +static s32 sxe_sfp_link_configure(struct sxe_adapter *adapter, u32 speed) +{ + s32 ret = 0; + bool an; + bool link_up; + u32 link_speed; + u32 pcs_mode = SXE_PCS_MODE_BUTT; + u32 max_frame = sxe_sw_mtu_get(adapter); + + sxe_sfp_link_capabilities_get(adapter, &speed, &an); + + if (SXE_LINK_SPEED_1GB_FULL == speed) { + pcs_mode = SXE_PCS_MODE_1000BASE_KX_W; + adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_1GB_FULL; + } else if (SXE_LINK_SPEED_10GB_FULL == speed) { + pcs_mode = SXE_PCS_MODE_10GBASE_KR_WO; + adapter->phy_ctxt.autoneg_advertised = SXE_LINK_SPEED_10GB_FULL; + } + + sxe_link_info_get(adapter, &link_speed, &link_up); + if ((link_up == true) && (link_speed == speed)) { + LOG_INFO_BDF("link cfg dont changed , dont need cfp pcs," + "speed=%x, mtu=%u\n", speed, max_frame); + goto l_end; + } + + ret = sxe_pcs_sds_init(adapter, pcs_mode, max_frame); + if (ret) { + LOG_ERROR_BDF("pcs sds init failed, ret=%d\n", ret); + } + + LOG_INFO_BDF("link :cfg speed=%x, pcs_mode=%x, atuoreg=%d, mtu=%u\n", + speed, pcs_mode, an, max_frame); + +l_end: + return ret; +} + +s32 sxe_link_configure(struct sxe_adapter *adapter, u32 speed) +{ + s32 ret; + + if (adapter->phy_ctxt.sfp_info.multispeed_fiber) { + ret = sxe_multispeed_sfp_link_configure(adapter, speed); + } else { + ret = sxe_sfp_link_configure(adapter, speed); + } + + return ret; +} + +s32 sxe_sfp_vendor_pn_cmp(u8 *sfp_vendor_pn) { + s32 ret = -EINVAL; + u32 i; + for (i = 0; i < SXE_COMPAT_SFP_NUM; i++) { + ret = memcmp(sfp_vendor_pn, sfp_vendor_pn_list[i], + SXE_SFP_VENDOR_PN_SIZE); + if (0 == ret) { + goto l_end; + } + } + +l_end: + return ret; +} + +s32 sxe_sfp_identify(struct sxe_adapter *adapter) +{ + s32 ret; + enum sxe_sfp_type sfp_type; + u8 sfp_comp_code[SXE_SFP_COMP_CODE_SIZE]; + struct sxe_sfp_info *sfp = &adapter->phy_ctxt.sfp_info; + + LOG_INFO_BDF("sfp identify start\n"); + + ret = sxe_sfp_eeprom_read(adapter, SXE_SFF_BASE_ADDR, + SXE_SFP_COMP_CODE_SIZE, sfp_comp_code); + if (ret) { + sfp_type = SXE_SFP_TYPE_NOT_PRESENT; + LOG_ERROR_BDF("get sfp identifier failed, ret=%d\n", ret); + goto l_end; + } + + LOG_INFO_BDF("sfp identifier=%x, cable_technology=%x, " + "10GB_code=%x, 1GB_code=%x\n", + sfp_comp_code[SXE_SFF_IDENTIFIER], + sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY], + sfp_comp_code[SXE_SFF_10GBE_COMP_CODES], + sfp_comp_code[SXE_SFF_1GBE_COMP_CODES]); + + if (sfp_comp_code[SXE_SFF_IDENTIFIER] != SXE_SFF_IDENTIFIER_SFP) { + LOG_WARN("sfp type get failed, offset=%d, type=%x\n", + SXE_SFF_IDENTIFIER, sfp_comp_code[SXE_SFF_IDENTIFIER]); + sfp_type = SXE_SFP_TYPE_UNKNOWN; + ret = -SXE_ERR_SFF_NOT_SUPPORTED; + goto l_end; + } + + if (sfp_comp_code[SXE_SFF_CABLE_TECHNOLOGY] & SXE_SFF_DA_PASSIVE_CABLE) { + sfp_type = SXE_SFP_TYPE_DA_CU; + } else if (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + (SXE_SFF_10GBASESR_CAPABLE | SXE_SFF_10GBASELR_CAPABLE)) { + sfp_type = SXE_SFP_TYPE_SRLR; + } else if (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASET_CAPABLE) { + sfp_type = SXE_SFP_TYPE_1G_CU; + } else if ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASESX_CAPABLE) || \ + (sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASELX_CAPABLE)) { + sfp_type = SXE_SFP_TYPE_1G_SXLX; + } else { + sfp_type = SXE_SFP_TYPE_UNKNOWN; + } + + sfp->multispeed_fiber = false; + + if (((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASESX_CAPABLE) && + (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + SXE_SFF_10GBASESR_CAPABLE)) || + ((sfp_comp_code[SXE_SFF_1GBE_COMP_CODES] & \ + SXE_SFF_1GBASELX_CAPABLE) && + (sfp_comp_code[SXE_SFF_10GBE_COMP_CODES] & \ + SXE_SFF_10GBASELR_CAPABLE))) { + sfp->multispeed_fiber = true; + LOG_INFO_BDF("identify sfp, sfp is multispeed\n"); + } + + LOG_INFO_BDF("identify sfp, sfp_type=%d, is_multispeed=%x\n", + sfp_type, sfp->multispeed_fiber); + +l_end: + adapter->phy_ctxt.sfp_info.type = sfp_type; + return ret; +} + +struct sxe_phy_ops phy_ops[SXE_PHY_MAX] = +{ + {sxe_sfp_identify, + sxe_sfp_link_configure, + sxe_sfp_link_capabilities_get, + sxe_sfp_reset, + sxe_sfp_tx_laser_disable, + sxe_sfp_tx_laser_enable}, +#ifdef SXE_PHY_CONFIGURE + {sxe_phy_identify, + sxe_phy_link_speed_configure, + sxe_phy_link_capabilities_get, + sxe_phy_reset, + NULL, + NULL}, +#endif +}; + +enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter) +{ + enum sxe_media_type type = SXE_MEDIA_TYPE_UNKWON; + + type = SXE_MEDIA_TYPE_FIBER; + adapter->phy_ctxt.is_sfp = true; + + return type; +} + +s32 sxe_phy_init(struct sxe_adapter *adapter) +{ + s32 ret; + enum sxe_media_type media_type = sxe_media_type_get(adapter); + + switch (media_type) { + case SXE_MEDIA_TYPE_FIBER: + adapter->phy_ctxt.ops = &phy_ops[SXE_SFP_IDX]; + break; +#ifdef SXE_PHY_CONFIGURE + case SXE_MEDIA_TYPE_COPPER: + adapter->phy_ctxt.ops = &phy_ops[SXE_PHY_MARVELL_88X3310_idx]; + break; +#endif + default: + LOG_DEV_ERR("other media type are not adapted\n, media_type=%d", + media_type); + break; + } + + ret = adapter->phy_ctxt.ops->identify(adapter); + if (ret) { + LOG_ERROR_BDF("phy identify failed, ret=%d\n", ret); + } + + return ret; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.h new file mode 100644 index 000000000000..bc646ae1656a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_phy.h @@ -0,0 +1,183 @@ +#ifndef __SXE_PHY_H__ +#define __SXE_PHY_H__ + +#include +#include +#include "sxe_host_cli.h" +#include "sxe_cli.h" +#ifdef SXE_PHY_CONFIGURE +#include +#include +#endif + +#define SXE_DEV_ID_FIBER 0 +#define SXE_DEV_ID_COPPER 1 + +#define SXE_SFF_BASE_ADDR 0x0 +#define SXE_SFF_IDENTIFIER 0x0 +#define SXE_SFF_10GBE_COMP_CODES 0x3 +#define SXE_SFF_1GBE_COMP_CODES 0x6 +#define SXE_SFF_CABLE_TECHNOLOGY 0x8 +#define SXE_SFF_VENDOR_PN 0x28 +#define SXE_SFF_8472_DIAG_MONITOR_TYPE 0x5C +#define SXE_SFF_8472_COMPLIANCE 0x5E + +#define SXE_SFF_IDENTIFIER_SFP 0x3 +#define SXE_SFF_ADDRESSING_MODE 0x4 +#define SXE_SFF_8472_UNSUP 0x0 +#define SXE_SFF_DDM_IMPLEMENTED 0x40 +#define SXE_SFF_DA_PASSIVE_CABLE 0x4 +#define SXE_SFF_DA_ACTIVE_CABLE 0x8 +#define SXE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define SXE_SFF_1GBASESX_CAPABLE 0x1 +#define SXE_SFF_1GBASELX_CAPABLE 0x2 +#define SXE_SFF_1GBASET_CAPABLE 0x8 +#define SXE_SFF_10GBASESR_CAPABLE 0x10 +#define SXE_SFF_10GBASELR_CAPABLE 0x20 + +#define SXE_SFP_COMP_CODE_SIZE 10 +#define SXE_SFP_VENDOR_PN_SIZE 16 +#define SXE_SFP_EEPROM_SIZE_MAX 512 + +#define SXE_SW_SFP_LOS_DELAY_MS 200 + +#define SXE_SW_SFP_MULTI_GB_MS 4000 + +#define SXE_PHY_ADDR_MAX 32 +#define SXE_MARVELL_88X3310_PHY_ID 0x2002B + +#define SXE_RATE_SEL_WAIT (40) +#define SXE_LINK_UP_RETRY_CNT (5) +#define SXE_LINK_UP_RETRY_ITR (100) +#define SXE_SFP_RESET_WAIT (100) + +#define SXE_DEVAD_SHIFT (16) +#define SXE_MII_DEV_TYPE_SHIFT (16) + +#define SXE_LINK_SPEED_MBPS_10G 10000 +#define SXE_LINK_SPEED_MBPS_1G 1000 +#define SXE_LINK_SPEED_MBPS_100 100 +#define SXE_LINK_SPEED_MBPS_10 10 + +struct sxe_adapter; + +enum sxe_media_type { + SXE_MEDIA_TYPE_UNKWON = 0, + SXE_MEDIA_TYPE_FIBER = 1, + SXE_MEDIA_TYPE_COPPER = 2, +}; + +enum sxe_phy_idx { + SXE_SFP_IDX = 0, + SXE_PHY_MARVELL_88X3310_idx, + SXE_PHY_MAX, +}; + +enum sxe_phy_type { + SXE_PHY_MARVELL_88X3310, + SXE_PHY_GENERIC, + SXE_PHY_CU_UNKNOWN, + SXE_PHY_UNKNOWN, +}; + +enum sxe_sfp_type { + SXE_SFP_TYPE_DA_CU = 0, + SXE_SFP_TYPE_SRLR = 1, + SXE_SFP_TYPE_1G_CU = 2, + SXE_SFP_TYPE_1G_SXLX = 4, + SXE_SFP_TYPE_NOT_PRESENT = 5, + SXE_SFP_TYPE_UNKNOWN = 0xFFFF , +}; + +struct sxe_phy_ops { + s32 (*identify)(struct sxe_adapter *adapter); + s32 (*link_configure)(struct sxe_adapter *adapter, u32 speed); + void (*get_link_capabilities)(struct sxe_adapter *adapter, u32 *speed, + bool *autoneg); + s32 (*reset)(struct sxe_adapter *adapter); + void (*sfp_tx_laser_disable)(struct sxe_adapter *adapter); + void (*sfp_tx_laser_enable)(struct sxe_adapter *adapter); +}; + +#ifdef SXE_PHY_CONFIGURE +struct sxe_phy_info { + u32 id; + bool autoneg; + struct mii_bus *mii_bus; + enum sxe_phy_type type; + struct mdio_if_info mdio; +}; +#endif + +struct sxe_sfp_info { + enum sxe_sfp_type type; + bool multispeed_fiber; +}; + +struct sxe_phy_context { + bool is_sfp; + u32 speed; + u32 autoneg_advertised; + struct sxe_phy_ops *ops; +#ifdef SXE_PHY_CONFIGURE + struct sxe_phy_info phy_info; +#endif + struct sxe_sfp_info sfp_info; +}; + +s32 sxe_phy_init(struct sxe_adapter *adapter); + +#ifdef SXE_PHY_CONFIGURE +int sxe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr); + +int sxe_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value); + +s32 sxe_phy_identify(struct sxe_adapter *adapter); + +void sxe_phy_link_capabilities_get(struct sxe_adapter *adapter, u32 *speed, + bool *autoneg); + +s32 sxe_phy_link_speed_configure(struct sxe_adapter *adapter, u32 speed); + +s32 sxe_mdiobus_init(struct sxe_adapter *adapter); + +void sxe_mdiobus_exit(struct sxe_adapter *adapter); + +s32 sxe_phy_reset(struct sxe_adapter *adapter); +#endif + +enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter); + +static inline bool sxe_is_sfp(struct sxe_adapter *adapter) +{ + return (sxe_media_type_get(adapter) == SXE_MEDIA_TYPE_FIBER) ? \ + true : false; +} + +s32 sxe_sfp_eeprom_read(struct sxe_adapter *adapter, u16 offset, + u16 len, u8 *data); + +s32 sxe_sfp_eeprom_write(struct sxe_adapter *adapter, u16 offset, + u32 len, u8 *data); + +enum sxe_media_type sxe_media_type_get(struct sxe_adapter *adapter); + +void sxe_sfp_tx_laser_disable(struct sxe_adapter *adapter); + +s32 sxe_sfp_vendor_pn_cmp(u8 *sfp_vendor_pn); + +s32 sxe_sfp_identify(struct sxe_adapter *adapter); + +s32 sxe_link_configure(struct sxe_adapter *adapter, u32 speed); + +s32 sxe_sfp_reset(struct sxe_adapter *adapter); + +void sxe_link_info_get(struct sxe_adapter *adapter, u32 *link_speed, bool *link_up); + +s32 sxe_pcs_sds_init(struct sxe_adapter *adapter, sxe_pcs_mode_e mode, + u32 max_frame); + +void sxe_fc_enable(struct sxe_adapter *adapter); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.c new file mode 100644 index 000000000000..c3f321419352 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.c @@ -0,0 +1,709 @@ + +#include "sxe.h" +#include "sxe_ptp.h" +#include "sxe_log.h" +#include "sxe_hw.h" + +static u64 sxe_ptp_read(const struct cyclecounter *cc) +{ + struct sxe_adapter *adapter = + container_of(cc, struct sxe_adapter, ptp_ctxt.hw_cc); + struct sxe_hw *hw = &adapter->hw; + + return hw->dbu.ops->ptp_systime_get(hw); +} + +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE +static int sxe_ptp_adjfine(struct ptp_clock_info *ptp, long ppm) +{ + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + struct sxe_hw *hw = &adapter->hw; + + u32 adj_ns; + u32 neg_adj = 0; + + if (ppm < 0) { + neg_adj = SXE_TIMADJ_SIGN; + adj_ns = (u32)(-((ppm * 125) >> 13)); + } else { + adj_ns = (u32)((ppm * 125) >> 13); + } + + LOG_DEBUG_BDF("ptp adjfreq adj_ns=%u, neg_adj=0x%x\n",adj_ns, neg_adj); + hw->dbu.ops->ptp_freq_adjust(hw, (neg_adj | adj_ns)); + + return 0; +} +#else +static int sxe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + struct sxe_hw *hw = &adapter->hw; + + u32 adj_ns; + u32 neg_adj = 0; + + if (ppb < 0) { + neg_adj = SXE_TIMADJ_SIGN; + adj_ns = -ppb; + } else { + adj_ns = ppb; + } + + LOG_DEBUG_BDF("ptp adjfreq adj_ns=%u, neg_adj=0x%x\n",adj_ns, neg_adj); + hw->dbu.ops->ptp_freq_adjust(hw, (neg_adj | adj_ns)); + + return 0; +} +#endif + +static int sxe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + timecounter_adjtime(&adapter->ptp_ctxt.hw_tc, delta); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + LOG_INFO_BDF("ptp adjust systim, delta: %lld, after adj: %llu\n", + delta, adapter->ptp_ctxt.hw_tc.nsec);; + + return 0; +} + +static int sxe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + unsigned long flags; + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + struct sxe_hw *hw = &adapter->hw; + u64 ns, systim_ns; + + systim_ns = hw->dbu.ops->ptp_systime_get(hw); + LOG_DEBUG_BDF("ptp get time = %llu ns\n", systim_ns); + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + ns = timecounter_cyc2time(&adapter->ptp_ctxt.hw_tc, systim_ns); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + LOG_DEBUG_BDF("timecounter_cyc2time = %llu ns\n", ns); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int sxe_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + unsigned long flags; + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + u64 ns = timespec64_to_ns(ts); + + LOG_DEBUG_BDF("ptp settime = %llu ns\n", ns); + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + timecounter_init(&adapter->ptp_ctxt.hw_tc, &adapter->ptp_ctxt.hw_cc, ns); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + return 0; +} + +static int sxe_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + s32 ret = 0; + struct sxe_adapter *adapter = + container_of(ptp, struct sxe_adapter, ptp_ctxt.ptp_clock_info); + if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_ctxt.ptp_setup_spp) { + ret = -ENOTSUPP; + goto l_ret; + } + + if (on) { + adapter->cap |= SXE_PTP_PPS_ENABLED; + } else { + adapter->cap &= ~SXE_PTP_PPS_ENABLED; + } + + adapter->ptp_ctxt.ptp_setup_spp(adapter); + +l_ret: + return ret; +} + +static inline void sxe_ptp_clock_info_init( + struct ptp_clock_info *ptp_clock_info, char *name) +{ + snprintf(ptp_clock_info->name, + sizeof(ptp_clock_info->name), "%s", name); + ptp_clock_info->owner = THIS_MODULE; + ptp_clock_info->max_adj = SXE_PTP_MAX_ADJ; + ptp_clock_info->n_alarm = 0; + ptp_clock_info->n_ext_ts = 0; + ptp_clock_info->n_per_out = 0; + ptp_clock_info->pps = 0; +#ifdef HAVE_PTP_CLOCK_INFO_ADJFINE + ptp_clock_info->adjfine = sxe_ptp_adjfine; +#else + ptp_clock_info->adjfreq = sxe_ptp_adjfreq; +#endif + ptp_clock_info->adjtime = sxe_ptp_adjtime; + ptp_clock_info->gettime64 = sxe_ptp_gettime; + ptp_clock_info->settime64 = sxe_ptp_settime; + ptp_clock_info->enable = sxe_ptp_feature_enable; + + return; +} + +static long sxe_ptp_clock_create(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + long ret = 0; + + if (!IS_ERR_OR_NULL(adapter->ptp_ctxt.ptp_clock)) { + goto l_ret; + } + + sxe_ptp_clock_info_init(&adapter->ptp_ctxt.ptp_clock_info, netdev->name); + LOG_DEBUG_BDF("init ptp[%s] info finish\n", adapter->ptp_ctxt.ptp_clock_info.name); + + adapter->ptp_ctxt.ptp_clock = + ptp_clock_register(&adapter->ptp_ctxt.ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_ctxt.ptp_clock)) { + ret = PTR_ERR(adapter->ptp_ctxt.ptp_clock); + adapter->ptp_ctxt.ptp_clock = NULL; + LOG_DEV_ERR("ptp_clock_register failed\n"); + goto l_ret; + } else if (adapter->ptp_ctxt.ptp_clock) { + LOG_DEV_INFO("registered PHC device on %s\n", netdev->name); + } + + adapter->ptp_ctxt.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->ptp_ctxt.tstamp_config.tx_type = HWTSTAMP_TX_OFF; + +l_ret: + return ret; +} + +static void sxe_ptp_clear_tx_timestamp(struct sxe_adapter *adapter) +{ + if (adapter->ptp_ctxt.ptp_tx_skb) { + dev_kfree_skb_any(adapter->ptp_ctxt.ptp_tx_skb); + adapter->ptp_ctxt.ptp_tx_skb = NULL; + } + + clear_bit_unlock(SXE_PTP_TX_IN_PROGRESS, &adapter->state); + + return; +} + +void sxe_ptp_overflow_check(struct sxe_adapter *adapter) +{ + unsigned long flags; + bool timeout = time_is_before_jiffies( + adapter->ptp_ctxt.last_overflow_check + + SXE_OVERFLOW_PERIOD); + + if (timeout) { + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + timecounter_read(&adapter->ptp_ctxt.hw_tc); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + adapter->ptp_ctxt.last_overflow_check = jiffies; + } + + return ; +} + +void sxe_ptp_rx_hang(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + bool rx_tmstamp_valid; + struct sxe_ring *rx_ring; + unsigned long rx_event; + u16 n; + + rx_tmstamp_valid = hw->dbu.ops->ptp_is_rx_timestamp_valid(hw); + if (!rx_tmstamp_valid) { + adapter->ptp_ctxt.last_rx_ptp_check = jiffies; + goto l_ret; + } + + rx_event = adapter->ptp_ctxt.last_rx_ptp_check; + for (n = 0; n < adapter->rx_ring_ctxt.num ; n++) { + rx_ring = adapter->rx_ring_ctxt.ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) { + rx_event = rx_ring->last_rx_timestamp; + } + } + + if (time_is_before_jiffies(rx_event + SXE_PTP_RX_TIMEOUT)) { + hw->dbu.ops->ptp_rx_timestamp_clear(hw); + adapter->ptp_ctxt.last_rx_ptp_check = jiffies; + + adapter->stats.sw.rx_hwtstamp_cleared++; + + LOG_MSG_DEBUG(drv, "clearing RX Timestamp hang\n"); + } + +l_ret: + return; +} + +void sxe_ptp_tx_hang(struct sxe_adapter *adapter) +{ + bool timeout = time_is_before_jiffies(adapter->ptp_ctxt.ptp_tx_start + + SXE_PTP_TX_TIMEOUT); + + if (!adapter->ptp_ctxt.ptp_tx_skb) { + LOG_INFO_BDF("no ptp skb to progress\n"); + goto l_ret; + } + + if (!test_bit(SXE_PTP_TX_IN_PROGRESS, &adapter->state)) { + LOG_INFO_BDF("tx ptp not in progress\n"); + goto l_ret; + } + + if (timeout) { + cancel_work_sync(&adapter->ptp_ctxt.ptp_tx_work); + sxe_ptp_clear_tx_timestamp(adapter); + adapter->stats.sw.tx_hwtstamp_timeouts++; + LOG_MSG_WARN(drv, "clearing Tx timestamp hang\n"); + } + +l_ret: + return; +} + +static void sxe_ptp_convert_to_hwtstamp(struct sxe_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + unsigned long flags; + u64 ns; + + memset(hwtstamp, 0, sizeof(*hwtstamp)); + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + ns = timecounter_cyc2time(&adapter->ptp_ctxt.hw_tc, timestamp); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + hwtstamp->hwtstamp = ns_to_ktime(ns); + + return ; +} + +static void sxe_ptp_tx_hwtstamp_process(struct sxe_adapter *adapter) +{ + struct sk_buff *skb = adapter->ptp_ctxt.ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + struct timespec64 ts; + u64 ns; + + ts.tv_nsec = adapter->ptp_ctxt.tx_hwtstamp_nsec; + ts.tv_sec = adapter->ptp_ctxt.tx_hwtstamp_sec; + + ns = (u64)timespec64_to_ns(&ts); + LOG_DEBUG_BDF("get tx timestamp value=%llu\n", ns); + + sxe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, ns); + + adapter->ptp_ctxt.ptp_tx_skb = NULL; + clear_bit_unlock(SXE_PTP_TX_IN_PROGRESS, &adapter->state); + +#if 0 + if (!(adapter->cap & SXE_1588V2_ONE_STEP)) { + skb_tstamp_tx(skb, &shhwtstamps); + } else { + if (adapter->cap & SXE_1588V2_ONE_STEP) { + adapter->cap &= ~SXE_1588V2_ONE_STEP; + } + } +#endif + + skb_tstamp_tx(skb, &shhwtstamps); + + dev_kfree_skb_any(skb); + + return; +} + +void sxe_ptp_get_rx_tstamp_in_pkt(struct sxe_irq_data *irq_data, + struct sk_buff *skb) +{ + __le64 ptp_tm; + struct sxe_adapter *adapter = irq_data->adapter; + + skb_copy_bits(skb, skb->len - SXE_TS_HDR_LEN, &ptp_tm, + SXE_TS_HDR_LEN); + __pskb_trim(skb, skb->len - SXE_TS_HDR_LEN); + + LOG_DEBUG_BDF("ptp get timestamp in pkt end = %llu\n", le64_to_cpu(ptp_tm)); + sxe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), + le64_to_cpu(ptp_tm)); + return; +} + +void sxe_ptp_get_rx_tstamp_in_reg(struct sxe_irq_data *irq_data, + struct sk_buff *skb) +{ + struct sxe_adapter *adapter = irq_data->adapter; + struct sxe_hw *hw = &adapter->hw; + u64 ptp_tm; + bool rx_tstamp_valid; + + if (!irq_data || !irq_data->adapter) { + goto l_ret; + } + + rx_tstamp_valid = hw->dbu.ops->ptp_is_rx_timestamp_valid(hw); + if (rx_tstamp_valid) { + ptp_tm = hw->dbu.ops->ptp_rx_timestamp_get(hw); + sxe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), ptp_tm); + } else { + LOG_INFO_BDF("rx timestamp not valid in rx hw rigister\n"); + goto l_ret; + } + +l_ret: + return; +} + +static void sxe_ptp_tx_work_handler(struct work_struct *work) +{ + struct sxe_adapter *adapter = container_of(work, struct sxe_adapter, + ptp_ctxt.ptp_tx_work); + struct sxe_hw *hw = &adapter->hw; + bool timeout = time_is_before_jiffies(adapter->ptp_ctxt.ptp_tx_start + + SXE_PTP_TX_TIMEOUT); + u32 ts_sec; + u32 ts_ns; + u32 last_sec; + u32 last_ns; + bool tx_tstamp_valid = true; + u8 i; + + if (!adapter->ptp_ctxt.ptp_tx_skb) { + sxe_ptp_clear_tx_timestamp(adapter); + goto l_ret; + } + + hw->dbu.ops->ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns); + if (ts_ns != adapter->ptp_ctxt.tx_hwtstamp_nsec || + ts_sec != adapter->ptp_ctxt.tx_hwtstamp_sec) { + + for (i = 0; i < SXE_TXTS_POLL_CHECK; i++) { + hw->dbu.ops->ptp_tx_timestamp_get(hw, &last_sec, &last_ns); + } + + for (; i < SXE_TXTS_POLL; i++) { + hw->dbu.ops->ptp_tx_timestamp_get(hw, &ts_sec, &ts_ns); + if ((last_ns != ts_ns) || (last_sec != ts_sec)) { + tx_tstamp_valid = false; + break; + } + } + + if (tx_tstamp_valid) { + adapter->ptp_ctxt.tx_hwtstamp_nsec = ts_ns; + adapter->ptp_ctxt.tx_hwtstamp_sec = ts_sec; + sxe_ptp_tx_hwtstamp_process(adapter); + return; + } + + LOG_MSG_DEBUG(drv, "Tx timestamp error, " + "ts: %u %u, last ts: %u %u\n", + ts_sec, ts_ns, last_sec, last_ns); + } + + if (timeout) { + sxe_ptp_clear_tx_timestamp(adapter); + adapter->stats.sw.tx_hwtstamp_timeouts++; + LOG_MSG_WARN(drv, "clearing Tx timestamp hang\n"); + } else { + schedule_work(&adapter->ptp_ctxt.ptp_tx_work); + } + +l_ret: + return; +} + +static s32 sxe_ptp_tx_type_get(s32 tx_type, u32 *tsctl) +{ + s32 ret = 0; + + switch (tx_type) { + case HWTSTAMP_TX_OFF: + *tsctl = SXE_TSCTRL_VER_2; + break; + case HWTSTAMP_TX_ON: + *tsctl |= SXE_TSCTRL_TSEN; + break; + default: + ret = -ERANGE; + } + + return ret; +} + +static s32 sxe_ptp_rx_filter_get(s32 *rx_filter, u32 *cap, + bool *is_v1, bool *is_l2, u32 *tses) +{ + s32 ret = 0; + + switch (*rx_filter) { + case HWTSTAMP_FILTER_NONE: + *cap &= ~(SXE_RX_HWTSTAMP_ENABLED | + SXE_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + *is_v1 = true; + *tses |= SXE_TSES_TXES_V1_SYNC | SXE_TSES_RXES_V1_SYNC; + *cap |= (SXE_RX_HWTSTAMP_ENABLED | + SXE_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + *is_v1 = true; + *tses |= SXE_TSES_TXES_V1_DELAY_REQ | SXE_TSES_RXES_V1_DELAY_REQ; + *cap |= (SXE_RX_HWTSTAMP_ENABLED | + SXE_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + *is_l2 = true; + *tses |= SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL; + *rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + *cap |= (SXE_RX_HWTSTAMP_ENABLED | + SXE_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + *is_v1 = true; + *tses |= SXE_TSES_TXES_V1_ALL | SXE_TSES_RXES_V1_ALL; + *rx_filter = HWTSTAMP_FILTER_ALL; + *cap |= SXE_RX_HWTSTAMP_ENABLED; + break; +#ifndef HAVE_NO_HWTSTAMP_FILTER_NTP_ALL + case HWTSTAMP_FILTER_NTP_ALL: +#endif + case HWTSTAMP_FILTER_ALL: + *tses |= SXE_TSES_TXES_V2_ALL | SXE_TSES_RXES_V2_ALL; + *rx_filter = HWTSTAMP_FILTER_ALL; + *cap |= SXE_RX_HWTSTAMP_ENABLED; + break; + default: + *cap &= ~(SXE_RX_HWTSTAMP_ENABLED | + SXE_RX_HWTSTAMP_IN_REGISTER); + *rx_filter = HWTSTAMP_FILTER_NONE; + ret = -ERANGE; + } + + return ret; +} + +static int sxe_ptp_set_timestamp_mode(struct sxe_adapter *adapter, + struct hwtstamp_config *config) +{ + struct sxe_hw *hw = &adapter->hw; + u32 tsctl = 0x0; + u32 tses = 0x0; + bool is_l2 = false; + bool is_v1 = false; + s32 ret; + + if (config->flags) { + ret = -EINVAL; + goto l_ret; + } + + LOG_DEBUG_BDF("ptp set timestamp mode: tx_type[0x%x], rx_filter[0x%x]\n", + config->tx_type, config->rx_filter); + + ret = sxe_ptp_tx_type_get(config->tx_type, &tsctl); + if (ret) { + LOG_ERROR_BDF("ptp get tx type err ret = %d\n", ret); + goto l_ret; + } + + ret = sxe_ptp_rx_filter_get(&config->rx_filter, &adapter->cap, + &is_v1, &is_l2, &tses); + if (ret) { + LOG_ERROR_BDF("ptp get rx filter err ret = %d\n", ret); + goto l_ret; + } + + + LOG_DEBUG_BDF("hw[%p] set hw timestamp: is_l2=%s, tsctl=0x%x, tses=0x%x\n", + hw, is_l2 ? "true" : "false", tsctl, tses); + hw->dbu.ops->ptp_timestamp_mode_set(hw, is_l2, tsctl, tses); + + hw->dbu.ops->ptp_timestamp_enable(hw); + + sxe_ptp_clear_tx_timestamp(adapter); + hw->dbu.ops->ptp_rx_timestamp_clear(hw); + +#if 0 + adapter->cap &= ~SXE_1588V2_ONE_STEP; +#endif + +l_ret: + return ret; +} + +int sxe_ptp_hw_tstamp_config_set(struct sxe_adapter *adapter, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int ret; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) { + ret = -EFAULT; + goto l_ret; + } + + ret = sxe_ptp_set_timestamp_mode(adapter, &config); + if (ret) { + LOG_ERROR_BDF("ptp set timestamp mode failed, err=%d\n",ret); + goto l_ret; + } + + memcpy(&adapter->ptp_ctxt.tstamp_config, &config, + sizeof(adapter->ptp_ctxt.tstamp_config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; + +l_ret: + return ret; +} + +int sxe_ptp_hw_tstamp_config_get(struct sxe_adapter *adapter, + struct ifreq *ifr) +{ + struct hwtstamp_config *config = &adapter->ptp_ctxt.tstamp_config; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; +} + +static void sxe_ptp_cyclecounter_start(struct sxe_adapter *adapter) +{ + struct cyclecounter cc; + unsigned long flags; + struct sxe_hw *hw = &adapter->hw; + + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + cc.read = sxe_ptp_read; + + hw->dbu.ops->ptp_systime_init(hw); + + smp_mb(); + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + memcpy(&adapter->ptp_ctxt.hw_cc, &cc, sizeof(adapter->ptp_ctxt.hw_cc)); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + + return; +} + +static void sxe_ptp_hw_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + hw->dbu.ops->ptp_init(hw); + return; +} + +static inline void sxe_ptp_systime_init(struct sxe_adapter *adapter) +{ + unsigned long flags; + + spin_lock_irqsave(&adapter->ptp_ctxt.ptp_timer_lock, flags); + timecounter_init(&adapter->ptp_ctxt.hw_tc, &adapter->ptp_ctxt.hw_cc, + ktime_get_real_ns()); + spin_unlock_irqrestore(&adapter->ptp_ctxt.ptp_timer_lock, flags); + +} + +void sxe_ptp_reset(struct sxe_adapter *adapter) +{ + sxe_ptp_hw_init(adapter); + + sxe_ptp_set_timestamp_mode(adapter, &adapter->ptp_ctxt.tstamp_config); + + sxe_ptp_cyclecounter_start(adapter); + + sxe_ptp_systime_init(adapter); + + adapter->ptp_ctxt.last_overflow_check = jiffies; + adapter->ptp_ctxt.tx_hwtstamp_nsec = 0; + adapter->ptp_ctxt.tx_hwtstamp_sec = 0; + + return; +} + +void sxe_ptp_configure(struct sxe_adapter *adapter) +{ + spin_lock_init(&adapter->ptp_ctxt.ptp_timer_lock); + + if (sxe_ptp_clock_create(adapter)) { + LOG_DEBUG_BDF("create ptp err in addr:[%p]\n", + adapter->ptp_ctxt.ptp_clock); + goto l_end; + } + + INIT_WORK(&adapter->ptp_ctxt.ptp_tx_work, sxe_ptp_tx_work_handler); + + sxe_ptp_reset(adapter); + + set_bit(SXE_PTP_RUNNING, &adapter->state); + +l_end: + return; +} + +void sxe_ptp_suspend(struct sxe_adapter *adapter) +{ + if (!test_and_clear_bit(SXE_PTP_RUNNING, &adapter->state)) { + goto l_ret; + } + + adapter->cap &= ~SXE_PTP_PPS_ENABLED; + if (adapter->ptp_ctxt.ptp_setup_spp) { + adapter->ptp_ctxt.ptp_setup_spp(adapter); + } + + cancel_work_sync(&adapter->ptp_ctxt.ptp_tx_work); + sxe_ptp_clear_tx_timestamp(adapter); + +l_ret: + return; +} + +void sxe_ptp_stop(struct sxe_adapter *adapter) +{ + sxe_ptp_suspend(adapter); + + if (adapter->ptp_ctxt.ptp_clock) { + ptp_clock_unregister(adapter->ptp_ctxt.ptp_clock); + adapter->ptp_ctxt.ptp_clock = NULL; + LOG_DEV_INFO("removed PHC on %s\n", + adapter->netdev->name); + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.h new file mode 100644 index 000000000000..4dbee740a73c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ptp.h @@ -0,0 +1,79 @@ + +#ifndef __SXE_PTP_H__ +#define __SXE_PTP_H__ + +#include +#include +#include "sxe.h" + +#define SXE_OVERFLOW_PERIOD (HZ * 30) +#define SXE_PTP_TX_TIMEOUT (HZ) +#define SXE_TS_HDR_LEN 8 +#define SXE_PTP_RX_TIMEOUT (5 * HZ) + +#define SXE_TIMADJ_SIGN 0x80000000 + +#define SXE_PTP_MSG_TYPE_SYNC 0x0000 +#define SXE_PTP_MSG_TYPE_DELAY_REQ 0x0001 +#define SXE_PTP_MSG_TYPE_MASK 0x000F + +#define SXE_PTP_FLAGFIELD_OFFSET 0x0006 +#define SXE_PTP_FLAGFIELD_TWOSTEP 0x0002 +#define SXE_PTP_FLAGFIELD_UNICAST 0x0004 +#define SXE_PTP_FLAGFIELD_MASK 0xFFFF + +#define SXE_PTP_MAX_ADJ 125000000 + +void sxe_ptp_configure(struct sxe_adapter *adapter); + +void sxe_ptp_suspend(struct sxe_adapter *adapter); + +void sxe_ptp_stop(struct sxe_adapter *adapter); + +void sxe_ptp_overflow_check(struct sxe_adapter *adapter); + +void sxe_ptp_rx_hang(struct sxe_adapter *adapter); + +void sxe_ptp_tx_hang(struct sxe_adapter *adapter); + +void sxe_ptp_reset(struct sxe_adapter *adapter); + +int sxe_ptp_hw_tstamp_config_set(struct sxe_adapter *adapter, + struct ifreq *ifr); + +int sxe_ptp_hw_tstamp_config_get(struct sxe_adapter *adapter, + struct ifreq *ifr); + +void sxe_ptp_get_rx_tstamp_in_pkt(struct sxe_irq_data *irq_data, + struct sk_buff *skb); + +void sxe_ptp_get_rx_tstamp_in_reg(struct sxe_irq_data *irq_data, + struct sk_buff *skb); + +static inline void sxe_ptp_rx_hwtstamp_process(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + LOG_DEBUG("process rx hwtsamp of ring[%u]\n", rx_ring->idx); + + if (unlikely(sxe_status_err_check(rx_desc, SXE_RXD_STAT_TSIP))) { + sxe_ptp_get_rx_tstamp_in_pkt(rx_ring->irq_data, skb); + LOG_DEBUG("we got the time stamp in the end of packet\n"); + goto l_ret; + } + + if (unlikely(!sxe_status_err_check(rx_desc, SXE_RXDADV_STAT_TS))) { + LOG_DEBUG("the ptp time stamp is not ready in register\n"); + goto l_ret; + } + + sxe_ptp_get_rx_tstamp_in_reg(rx_ring->irq_data, skb); + LOG_DEBUG("we got the time stamp in the time register\n"); + + rx_ring->last_rx_timestamp = jiffies; + +l_ret: + return; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.c new file mode 100644 index 000000000000..cb322e9c7931 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.c @@ -0,0 +1,578 @@ +#include +#include + +#include "sxe.h" +#include "sxe_log.h" + +u16 sxe_rss_num_get(struct sxe_adapter *adapter) +{ + return (((adapter->cap & SXE_SRIOV_DCB_ENABLE) == \ + SXE_SRIOV_DCB_ENABLE)) ? \ + SXE_NON_RSS_RING_NUM : adapter->ring_f.rss; +} + +u16 sxe_rss_mask_get(struct sxe_adapter *adapter) +{ + u16 mask; + u8 tcs = sxe_dcb_tc_get(adapter); + u16 num = adapter->pool_f.total_num; + + switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) { + case (SXE_SRIOV_DCB_ENABLE): + mask = SXE_RSS_DISABLED_MASK; + break; + case SXE_DCB_ENABLE: + mask = (tcs <= SXE_DCB_4_TC) ? \ + SXE_RSS_16Q_MASK : SXE_RSS_8Q_MASK; + break; + case SXE_SRIOV_ENABLE: + mask = (num <= SXE_32_POOL) ? \ + SXE_RSS_4Q_MASK : SXE_RSS_2Q_MASK; + break; + default: + mask = SXE_RSS_16Q_MASK; + break; + } + + return mask; +} +u16 sxe_pool_mask_get(struct sxe_adapter *adapter) +{ + u16 mask; + u8 tcs = sxe_dcb_tc_get(adapter); + u16 pool_total_num = adapter->pool_f.total_num; + + switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) { + case (SXE_SRIOV_DCB_ENABLE): + mask = (tcs > 4) ? SXE_8Q_PER_POOL_MASK : SXE_4Q_PER_POOL_MASK; + break; + case SXE_SRIOV_ENABLE: + mask = (pool_total_num > SXE_32_POOL) ? \ + SXE_2Q_PER_POOL_MASK : SXE_4Q_PER_POOL_MASK; + break; + default: + mask = 0; + break; + } + + return mask; +} + +void sxe_ring_feature_init(struct sxe_adapter *adapter) +{ + u16 rss, fnav; + + rss = min_t(u16, SXE_RSS_RING_NUM_MAX, num_online_cpus()); + adapter->ring_f.rss_limit = rss; + + fnav = min_t(u16, SXE_FNAV_RING_NUM_MAX, num_online_cpus()); + + adapter->ring_f.fnav_limit = fnav; + + adapter->pool_f.pf_num_used = 1; + adapter->pool_f.pf_num_limit = SXE_NUM_PF_POOL_DEFAULT; + + adapter->tx_ring_ctxt.depth = SXE_DEFAULT_DESC_CNT; + adapter->rx_ring_ctxt.depth = SXE_DEFAULT_DESC_CNT; + + adapter->xdp_ring_ctxt.depth = 0; + + return; +} + +static void sxe_dcb_sriov_ring_set(struct sxe_adapter *adapter) +{ + u32 i; + u8 tcs = sxe_dcb_tc_get(adapter); + u16 pf_pool_num_max = SXE_TXRX_RING_NUM_MAX / tcs; + struct sxe_pool_feature *pool_f = &adapter->pool_f; + struct sxe_ring_feature *ring_f = &adapter->ring_f; + + pool_f->pf_num_used = min_t(u16, pool_f->pf_num_limit, pf_pool_num_max); + pool_f->total_num = pool_f->pf_num_used + pool_f->vf_num_used; + + if (tcs > SXE_DCB_4_TC) { + pool_f->total_num = min_t(u16, pool_f->total_num, SXE_16_POOL); + } else { + pool_f->total_num = min_t(u16, pool_f->total_num, SXE_32_POOL); + } + + pool_f->pf_num_used = pool_f->total_num - pool_f->vf_num_used; + ring_f->tc_per_pool = tcs; + + adapter->rx_ring_ctxt.num = pool_f->pf_num_used * ring_f->tc_per_pool; + adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num; + adapter->xdp_ring_ctxt.num = 0; + + for (i = 0; i < tcs; i++) { + netdev_set_tc_queue(adapter->netdev, i, SXE_NON_RSS_RING_NUM, i); + } + + adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE; + + LOG_INFO_BDF("tcs = %d, pf_num_used = %d, " + "pool_total_num=%d, tc_per_pool=%d, rx_num=%u, " + "tx_num=%u, adapter_cap = 0x%x\n", + tcs, pool_f->pf_num_used, pool_f->total_num, + ring_f->tc_per_pool, adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, adapter->cap); + + return; +} + +static void sxe_dcb_ring_set(struct sxe_adapter *adapter) +{ + u32 i; + u16 ring_per_tc; + u8 tcs = sxe_dcb_tc_get(adapter); + struct net_device *dev = adapter->netdev; + struct sxe_ring_feature *ring_f = &adapter->ring_f; + + ring_per_tc = dev->num_tx_queues / tcs; + if (tcs > SXE_DCB_4_TC) { + ring_per_tc = min_t(u16, ring_per_tc, SXE_8_RING_PER_TC); + } else { + ring_per_tc = min_t(u16, ring_per_tc, SXE_16_RING_PER_TC); + } + + ring_per_tc = min_t(s32, ring_per_tc, ring_f->rss_limit); + + ring_f->ring_per_tc = ring_per_tc; + adapter->rx_ring_ctxt.num = ring_per_tc * tcs; + adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num; + adapter->xdp_ring_ctxt.num = 0; + + for (i = 0; i < tcs; i++) { + netdev_set_tc_queue(dev, i, ring_per_tc, ring_per_tc * i); + } + + adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE; + + LOG_INFO_BDF("tcs = %d, ring_per_tc=%d," + "rx_num=%u, tx_num=%u, adapter_cap = 0x%x\n", + tcs, ring_f->ring_per_tc, adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, adapter->cap); + + return; +} + +static void sxe_sriov_ring_set(struct sxe_adapter *adapter) +{ + u16 num_pool, ring_per_pool; + struct sxe_pool_feature *pool_f = &adapter->pool_f; + struct sxe_ring_feature *ring_f = &adapter->ring_f; + u16 pf_num_used = pool_f->pf_num_limit; + u16 max_ring_per_pool = SXE_TXRX_RING_NUM_MAX / pf_num_used; + + ring_per_pool = min_t(u16, ring_f->rss_limit, max_ring_per_pool); + num_pool = pf_num_used + pool_f->vf_num_used; + num_pool = min_t(u16, SXE_POOLS_NUM_MAX, num_pool); + pf_num_used = num_pool - pool_f->vf_num_used; + + if (num_pool > SXE_32_POOL) { + ring_per_pool = min_t(u16, ring_per_pool, SXE_2_RING_PER_POOL); + } else { + ring_per_pool = (ring_per_pool > SXE_3_RING_PER_POOL) ? + SXE_4_RING_PER_POOL : + (ring_per_pool > SXE_1_RING_PER_POOL) ? + SXE_2_RING_PER_POOL : SXE_1_RING_PER_POOL; + } + + ring_f->ring_per_pool = ring_per_pool; + pool_f->total_num = num_pool; + pool_f->pf_num_used = pf_num_used; + + adapter->rx_ring_ctxt.num = pf_num_used * ring_per_pool; + adapter->tx_ring_ctxt.num = adapter->rx_ring_ctxt.num; + adapter->xdp_ring_ctxt.num = 0; + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + if (pf_num_used > SXE_NUM_PF_POOL_DEFAULT) { + netdev_set_num_tc(adapter->netdev, SXE_DCB_1_TC); + } +#endif + netdev_set_tc_queue(adapter->netdev, 0, ring_f->ring_per_pool, 0); + + adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE; + + LOG_INFO_BDF("pf_num_used = %d, pool_total_num=%d, " + "ring_per_pool=%d, rx_num=%u, tx_num=%u, rss_limit=%u, " + "adapter_cap = 0x%x\n", + pool_f->pf_num_used, pool_f->total_num, + ring_f->ring_per_pool, + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + ring_f->rss_limit, + adapter->cap); + + return; +} + +static u16 sxe_xdp_queues_num_get(struct sxe_adapter *adapter) +{ + u16 queues = min_t(u16, SXE_XDP_RING_NUM_MAX, nr_cpu_ids); + + return adapter->xdp_prog ? queues : 0; +} + +static void sxe_rss_ring_set(struct sxe_adapter *adapter) +{ + u16 rss; + struct sxe_ring_feature *ring_f = &adapter->ring_f; + + ring_f->rss = ring_f->rss_limit; + rss = ring_f->rss; + + adapter->cap &= ~SXE_FNAV_SAMPLE_ENABLE; + + if (rss > SXE_NON_RSS_RING_NUM) { + adapter->cap |= SXE_RSS_ENABLE; + } + + if ((adapter->cap & SXE_RSS_ENABLE) && + adapter->fnav_ctxt.sample_rate) { + ring_f->fnav_num = ring_f->fnav_limit; + rss = ring_f->fnav_num; + + if (!(adapter->cap & SXE_FNAV_SPECIFIC_ENABLE)) { + adapter->cap |= SXE_FNAV_SAMPLE_ENABLE; + } + } + + adapter->rx_ring_ctxt.num = rss; + adapter->tx_ring_ctxt.num = rss; + + adapter->xdp_ring_ctxt.num = sxe_xdp_queues_num_get(adapter); + + LOG_INFO_BDF("rss=%u, rss_limit=%u, fnav_limit=%u " + "rx_num=%u, tx_num=%u, xdp_num=%u cap=0x%x\n", + ring_f->rss, ring_f->rss_limit, + ring_f->fnav_limit, + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num, + adapter->cap); + + return; +} + +void sxe_ring_num_set(struct sxe_adapter *adapter) +{ + adapter->rx_ring_ctxt.num = SXE_NON_RSS_RING_NUM; + adapter->tx_ring_ctxt.num = SXE_NON_RSS_RING_NUM; + adapter->xdp_ring_ctxt.num = 0; + + adapter->pool_f.pf_num_used = 1; + adapter->ring_f.ring_per_pool = 1; + + switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) { + case (SXE_SRIOV_DCB_ENABLE): + sxe_dcb_sriov_ring_set(adapter); + break; + case SXE_DCB_ENABLE: + sxe_dcb_ring_set(adapter); + break; + case SXE_SRIOV_ENABLE: + sxe_sriov_ring_set(adapter); + break; + default: + sxe_rss_ring_set(adapter); + break; + } + + LOG_INFO_BDF("set ring num, cap = 0x%x\n", adapter->cap); + return; +} + +static void sxe_dcb_sriov_ring_reg_map(struct sxe_adapter *adapter) +{ + u32 i; + u16 reg_idx; + u8 tcs = sxe_dcb_tc_get(adapter); + u16 pool = 0; + u16 pool_mask = sxe_pool_mask_get(adapter); + struct sxe_pool_feature *pool_f = &adapter->pool_f; + + reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask); + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++, reg_idx++) { + if ((reg_idx & ~pool_mask) >= tcs) { + pool++; + reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask); + } + + adapter->rx_ring_ctxt.ring[i]->reg_idx = reg_idx; + adapter->rx_ring_ctxt.ring[i]->netdev = pool ? \ + NULL : adapter->netdev; + } + + reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask); + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++) { + if ((reg_idx & ~pool_mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask); + adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx; + } + + LOG_INFO_BDF("dcb sriov ring to reg mapping\n"); + return; +} + +static void sxe_first_reg_idx_get(u8 tcs, u8 tc_idx, + u32 *tx_idx, u32 *rx_idx) +{ + if (tcs > SXE_DCB_4_TC) { + *rx_idx = tc_idx << SXE_8TC_RX_RING_SHIFT_4; + if (tc_idx < SXE_TC_IDX3) { + *tx_idx = tc_idx << SXE_TC2_TX_RING_SHIFT_5; + } else if (tc_idx < SXE_TC_IDX5) { + *tx_idx = (tc_idx + SXE_TX_RING_OFFSET_2) << \ + SXE_TC4_TX_RING_SHIFT_4; + } else { + *tx_idx = (tc_idx + SXE_TX_RING_OFFSET_8) << \ + SXE_TC5_TX_RING_SHIFT_3; + } + + } else { + *rx_idx = tc_idx << SXE_4TC_RX_RING_SHIFT_5; + if (tc_idx < SXE_TC_IDX2) { + *tx_idx = tc_idx << SXE_TC1_TX_RING_SHIFT_6; + } else { + *tx_idx = (tc_idx + SXE_TX_RING_OFFSET_4) << \ + SXE_TC4_TX_RING_SHIFT_4; + } + } + + return; +} + +static void sxe_dcb_ring_reg_map(struct sxe_adapter *adapter) +{ + u32 i, offset; + u16 ring_per_tc; + u32 tx_idx = 0; + u32 rx_idx = 0; + u32 tc_idx = 0; + u8 tcs = sxe_dcb_tc_get(adapter); + + ring_per_tc = adapter->ring_f.ring_per_tc; + + for (offset = 0; tc_idx < tcs; tc_idx++, offset += ring_per_tc) { + sxe_first_reg_idx_get(tcs, tc_idx, &tx_idx, &rx_idx); + for (i = 0; i < ring_per_tc; i++, tx_idx++, rx_idx++) { + adapter->tx_ring_ctxt.ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring_ctxt.ring[offset + i]->reg_idx = rx_idx; + adapter->rx_ring_ctxt.ring[offset + i]->netdev = + adapter->netdev; + adapter->tx_ring_ctxt.ring[offset + i]->tc_idx = tc_idx; + adapter->rx_ring_ctxt.ring[offset + i]->tc_idx = tc_idx; + } + } + + LOG_INFO_BDF("dcb ring to reg mapping\n"); + return; +} + +static void sxe_sriov_ring_reg_map(struct sxe_adapter *adapter) +{ + u32 i; + u16 reg_idx; + u16 pool = 0; + u16 pool_mask = sxe_pool_mask_get(adapter); + u16 rss_mask = sxe_rss_mask_get(adapter); + struct sxe_pool_feature *pool_f = &adapter->pool_f; + struct sxe_ring_feature *ring_f = &adapter->ring_f; + + reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask); + for (i = 0; i < adapter->rx_ring_ctxt.num; i++, reg_idx++) { + if ((reg_idx & ~pool_mask) >= ring_f->ring_per_pool) { + pool++; + reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask); + } + adapter->rx_ring_ctxt.ring[i]->reg_idx = reg_idx; + adapter->rx_ring_ctxt.ring[i]->netdev = pool ? \ + NULL : adapter->netdev; + LOG_INFO_BDF("rx ring idx[%u] map to reg idx[%d]\n", i, reg_idx); + } + + reg_idx = pool_f->vf_num_used * SXE_HW_RING_IN_POOL(pool_mask); + for (i = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++) { + if ((reg_idx & rss_mask) >= ring_f->ring_per_pool){ + reg_idx = __ALIGN_MASK(reg_idx, ~pool_mask); + } + adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx; + LOG_INFO_BDF("tx ring idx[%u] map to reg idx[%d]\n", i, reg_idx); + } + + LOG_INFO_BDF("sriov ring to reg mapping\n"); + return; +} + +static void sxe_rss_ring_reg_map(struct sxe_adapter *adapter) +{ + u32 i, reg_idx; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + adapter->rx_ring_ctxt.ring[i]->reg_idx = i; + adapter->rx_ring_ctxt.ring[i]->netdev = adapter->netdev; + } + + for (i = 0, reg_idx = 0; i < adapter->tx_ring_ctxt.num; i++, reg_idx++) { + adapter->tx_ring_ctxt.ring[i]->reg_idx = reg_idx; + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++, reg_idx++) { + adapter->xdp_ring_ctxt.ring[i]->reg_idx = reg_idx; + } + + LOG_INFO_BDF("rss ring to reg mapping\n"); + return; +} + +void sxe_ring_reg_map(struct sxe_adapter *adapter) +{ + SXE_BUG_ON(!adapter); + + adapter->rx_ring_ctxt.ring[0]->reg_idx = 0; + adapter->tx_ring_ctxt.ring[0]->reg_idx = 0; + + switch (adapter->cap & SXE_SRIOV_DCB_ENABLE) { + case (SXE_SRIOV_DCB_ENABLE): + sxe_dcb_sriov_ring_reg_map(adapter); + break; + case SXE_DCB_ENABLE: + sxe_dcb_ring_reg_map(adapter); + break; + case SXE_SRIOV_ENABLE: + sxe_sriov_ring_reg_map(adapter); + break; + default: + sxe_rss_ring_reg_map(adapter); + break; + } + + LOG_INFO_BDF("ring to reg mapping, cap = %x\n", adapter->cap); + return; +} + + +static void sxe_add_ring(struct sxe_ring *ring, + struct sxe_list *head) +{ + ring->next = head->next; + head->next = ring; + head->cnt++; + + return; +} + +void sxe_tx_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx) +{ + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxe_ring *ring = &(irq_data->ring[base]); + u16 txr_idx = ring_idx; + + LOG_INFO_BDF("irq_idx:%u tx_ring_cnt:%u base:%u ring_idx:%u.\n", + irq_idx, cnt, base, ring_idx); + + while(cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + sxe_add_ring(ring, &irq_data->tx.list); + irq_data->tx.irq_rate.next_update = jiffies + 1; + + ring->depth = adapter->tx_ring_ctxt.depth; + ring->idx = txr_idx; + + WRITE_ONCE(adapter->tx_ring_ctxt.ring[txr_idx], ring); + cnt--; + txr_idx += adapter->irq_ctxt.ring_irq_num; + ring++; + } + + return; +} + +void sxe_xdp_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx) +{ + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxe_ring *ring = &(irq_data->ring[base]); + u16 xdp_idx = ring_idx; + + LOG_INFO_BDF("irq_idx:%u xdp_ring_cnt:%u base:%u ring_idx:%u.\n", + irq_idx, cnt, base, ring_idx); + + while(cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + irq_data->tx.xdp_ring = ring; + + ring->depth = adapter->tx_ring_ctxt.depth; + ring->idx = xdp_idx; + set_ring_xdp(ring); + spin_lock_init(&ring->tx_lock); + + WRITE_ONCE(adapter->xdp_ring_ctxt.ring[xdp_idx], ring); + + cnt--; + xdp_idx++; + ring++; + } + + return; +} + +void sxe_rx_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx) +{ + struct sxe_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxe_ring *ring = &(irq_data->ring[base]); + u16 rxr_idx = ring_idx; + + LOG_INFO_BDF("irq_idx:%u rx_ring_cnt:%u base:%u ring_idx:%u.\n", + irq_idx, cnt, base, ring_idx); + + while(cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + sxe_add_ring(ring, &irq_data->rx.list); + irq_data->rx.irq_rate.next_update = jiffies + 1; + + ring->depth = adapter->rx_ring_ctxt.depth; + ring->idx = rxr_idx; + + WRITE_ONCE(adapter->rx_ring_ctxt.ring[rxr_idx], ring); + cnt--; + rxr_idx += adapter->irq_ctxt.ring_irq_num; + ring++; + } + + return; +} + +void sxe_ring_stats_init(struct sxe_adapter *adapter) +{ + u32 i; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + u64_stats_init(&adapter->rx_ring_ctxt.ring[i]->syncp); + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + u64_stats_init(&adapter->tx_ring_ctxt.ring[i]->syncp); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + u64_stats_init(&adapter->xdp_ring_ctxt.ring[i]->syncp); + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.h new file mode 100644 index 000000000000..cc476b2d2726 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_ring.h @@ -0,0 +1,457 @@ + +#ifndef __SXE_RING_H__ +#define __SXE_RING_H__ + +#include "sxe_compat.h" +#include +#ifndef HAVE_NO_XDP_BUFF_RXQ +#include +#endif + +struct sxe_adapter; +struct sxe_irq_data; + +#define SXE_NON_RSS_RING_NUM 1 +#define MIN_QUEUES_IN_SRIOV 4 +#define SXE_RSS_RING_NUM_MAX 16 +#define SXE_TXRX_RING_NUM_MAX 64 +#define SXE_XDP_RING_NUM_MAX SXE_TXRX_RING_NUM_MAX +#define SXE_FNAV_RING_NUM_MAX (SXE_TXRX_RING_NUM_MAX - 1) + +#define SXE_NUM_PF_POOL_DEFAULT 1 + +#define SXE_PAGE_SIZE_4KB 4096 +#define SXE_PAGE_SIZE_8KB 8192 +#define SXE_PAGE_SIZE_64KB 65536 + +#define SXE_IP_HEAD_LEN_UNIT 4 + +#define SXE_TC_IDX2 2 +#define SXE_TC_IDX3 3 +#define SXE_TC_IDX5 5 +#define SXE_DCB_TC_MAX 8 +#define SXE_8_RING_PER_TC 8 +#define SXE_16_RING_PER_TC 16 +#define SXE_TX_RING_OFFSET_2 2 +#define SXE_TX_RING_OFFSET_4 4 +#define SXE_TX_RING_OFFSET_8 8 +#define SXE_TC1_TX_RING_SHIFT_6 6 +#define SXE_TC2_TX_RING_SHIFT_5 5 +#define SXE_TC4_TX_RING_SHIFT_4 4 +#define SXE_TC5_TX_RING_SHIFT_3 3 +#define SXE_8TC_RX_RING_SHIFT_4 4 +#define SXE_4TC_RX_RING_SHIFT_5 5 +#define SXE_MAX_QOS_IDX 7 + +#define SXE_DESC_CNT_MAX 4096 +#define SXE_DESC_CNT_MIN 64 +#define SXE_DEFAULT_DESC_CNT 512 +#define SXE_REQ_DESCRIPTOR_MULTIPLE 8 + +#define SXE_TX_WORK_LIMIT 256 + +#define SXE_RSS_16Q_MASK 0xF +#define SXE_RSS_8Q_MASK 0x7 +#define SXE_RSS_4Q_MASK 0x3 +#define SXE_RSS_2Q_MASK 0x1 +#define SXE_RSS_DISABLED_MASK 0x0 + +#define SXE_RXD_STAT_DD 0x01 +#define SXE_RXD_STAT_EOP 0x02 +#define SXE_RXD_STAT_FLM 0x04 +#define SXE_RXD_STAT_VP 0x08 +#define SXE_RXDADV_NEXTP_MASK 0x000FFFF0 +#define SXE_RXDADV_NEXTP_SHIFT 0x00000004 +#define SXE_RXD_STAT_UDPCS 0x10 +#define SXE_RXD_STAT_L4CS 0x20 +#define SXE_RXD_STAT_IPCS 0x40 +#define SXE_RXD_STAT_PIF 0x80 +#define SXE_RXD_STAT_CRCV 0x100 +#define SXE_RXD_STAT_OUTERIPCS 0x100 +#define SXE_RXD_STAT_VEXT 0x200 +#define SXE_RXD_STAT_UDPV 0x400 +#define SXE_RXD_STAT_DYNINT 0x800 +#define SXE_RXD_STAT_LLINT 0x800 +#define SXE_RXD_STAT_TSIP 0x08000 +#define SXE_RXD_STAT_TS 0x10000 +#define SXE_RXD_STAT_SECP 0x20000 +#define SXE_RXD_STAT_LB 0x40000 +#define SXE_RXD_STAT_ACK 0x8000 +#define SXE_RXD_ERR_CE 0x01 +#define SXE_RXD_ERR_LE 0x02 +#define SXE_RXD_ERR_PE 0x08 +#define SXE_RXD_ERR_OSE 0x10 +#define SXE_RXD_ERR_USE 0x20 +#define SXE_RXD_ERR_TCPE 0x40 +#define SXE_RXD_ERR_IPE 0x80 +#define SXE_RXDADV_ERR_MASK 0xfff00000 +#define SXE_RXDADV_ERR_SHIFT 20 +#define SXE_RXDADV_ERR_OUTERIPER 0x04000000 +#define SXE_RXDADV_ERR_FCEOFE 0x80000000 +#define SXE_RXDADV_ERR_FCERR 0x00700000 +#define SXE_RXDADV_ERR_FNAV_LEN 0x00100000 +#define SXE_RXDADV_ERR_FNAV_DROP 0x00200000 +#define SXE_RXDADV_ERR_FNAV_COLL 0x00400000 +#define SXE_RXDADV_ERR_HBO 0x00800000 +#define SXE_RXDADV_ERR_CE 0x01000000 +#define SXE_RXDADV_ERR_LE 0x02000000 +#define SXE_RXDADV_ERR_PE 0x08000000 +#define SXE_RXDADV_ERR_OSE 0x10000000 +#define SXE_RXDADV_ERR_IPSEC_INV_PROTOCOL 0x08000000 +#define SXE_RXDADV_ERR_IPSEC_INV_LENGTH 0x10000000 +#define SXE_RXDADV_ERR_IPSEC_AUTH_FAILED 0x18000000 +#define SXE_RXDADV_ERR_USE 0x20000000 +#define SXE_RXDADV_ERR_L4E 0x40000000 +#define SXE_RXDADV_ERR_IPE 0x80000000 +#define SXE_RXD_VLAN_ID_MASK 0x0FFF +#define SXE_RXD_PRI_MASK 0xE000 +#define SXE_RXD_PRI_SHIFT 13 +#define SXE_RXD_CFI_MASK 0x1000 +#define SXE_RXD_CFI_SHIFT 12 +#define SXE_RXDADV_LROCNT_MASK 0x001E0000 +#define SXE_RXDADV_LROCNT_SHIFT 17 +#define SXE_MAX_VLAN_IDX 4095 + +#define SXE_RXDADV_STAT_DD SXE_RXD_STAT_DD +#define SXE_RXDADV_STAT_EOP SXE_RXD_STAT_EOP +#define SXE_RXDADV_STAT_FLM SXE_RXD_STAT_FLM +#define SXE_RXDADV_STAT_VP SXE_RXD_STAT_VP +#define SXE_RXDADV_STAT_MASK 0x000fffff +#define SXE_RXDADV_STAT_TS 0x00010000 +#define SXE_RXDADV_STAT_SECP 0x00020000 + +#define SXE_RXDADV_RSSTYPE_NONE 0x00000000 +#define SXE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define SXE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define SXE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define SXE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define SXE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define SXE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define SXE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define SXE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define SXE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +#define SXE_RSS_L4_TYPES_MASK \ + ((1ul << SXE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << SXE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << SXE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << SXE_RXDADV_RSSTYPE_IPV6_UDP)) + +#define SXE_RXDADV_PKTTYPE_NONE 0x00000000 +#define SXE_RXDADV_PKTTYPE_IPV4 0x00000010 +#define SXE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 +#define SXE_RXDADV_PKTTYPE_IPV6 0x00000040 +#define SXE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 +#define SXE_RXDADV_PKTTYPE_TCP 0x00000100 +#define SXE_RXDADV_PKTTYPE_UDP 0x00000200 +#define SXE_RXDADV_PKTTYPE_SCTP 0x00000400 +#define SXE_RXDADV_PKTTYPE_NFS 0x00000800 +#define SXE_RXDADV_PKTTYPE_VXLAN 0x00000800 +#define SXE_RXDADV_PKTTYPE_TUNNEL 0x00010000 +#define SXE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 +#define SXE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 +#define SXE_RXDADV_PKTTYPE_LINKSEC 0x00004000 +#define SXE_RXDADV_PKTTYPE_ETQF 0x00008000 +#define SXE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 +#define SXE_RXDADV_PKTTYPE_ETQF_SHIFT 4 + +#define SXE_HW_RING_IN_POOL(POOL_MASK) (__ALIGN_MASK(1, ~(POOL_MASK))) + +#define PF_POOL_INDEX(p) ((p) + adapter->pool_f.vf_num_used) +#define SXE_RX_DESC(R, i) \ + (&(((union sxe_rx_data_desc *)((R)->desc.base_addr))[i])) + +#define ring_is_lro_enabled(ring) \ + test_bit(SXE_RX_LRO_ENABLED, &(ring)->state) +#define set_ring_lro_enabled(ring) \ + set_bit(SXE_RX_LRO_ENABLED, &(ring)->state) +#define clear_ring_lro_enabled(ring) \ + clear_bit(SXE_RX_LRO_ENABLED, &(ring)->state) + +#define ring_is_xdp(ring) \ + test_bit(SXE_TX_XDP_RING, &(ring)->state) +#define set_ring_xdp(ring) \ + set_bit(SXE_TX_XDP_RING, &(ring)->state) +#define clear_ring_xdp(ring) \ + clear_bit(SXE_TX_XDP_RING, &(ring)->state) + +#define sxe_for_each_ring(post, head) \ + for (post = (head).next; post != NULL; post = post->next) + +#define SXE_TX_DESC(R, i) \ + (&(((union sxe_tx_data_desc *)((R)->desc.base_addr))[i])) +#define SXE_TX_CTXTDESC(R, i) \ + (&(((struct sxe_tx_context_desc *)((R)->desc.base_addr))[i])) + +#define SXE_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define SXE_TX_WAKE_THRESHOLD (SXE_TX_DESC_NEEDED * 2) + +#define SXE_TX_NON_DATA_DESC_NUM 3 +#define SXE_DATA_PER_DESC_SIZE_SHIFT 14 +#define SXE_DATA_PER_DESC_SIZE_MAX (1u << SXE_DATA_PER_DESC_SIZE_SHIFT) +#define SXE_TX_DESC_USE_COUNT(S) DIV_ROUND_UP((S), SXE_DATA_PER_DESC_SIZE_MAX) +#define SXE_TX_DESC_PREFETCH_THRESH_1 1 +#define SXE_TX_DESC_PREFETCH_THRESH_8 8 +#define SXE_TX_DESC_HOST_THRESH 1 +#define SXE_TX_DESC_WRITEBACK_THRESH 32 +#define SXE_MAX_TXRX_DESC_POLL 10 + +enum sxe_ring_state { + SXE_RX_3K_BUFFER, + SXE_RX_BUILD_SKB_ENABLED, + SXE_RX_LRO_ENABLED, + SXE_TX_FNAV_INIT_DONE, + SXE_TX_XPS_INIT_DONE, + SXE_TX_DETECT_HANG, + SXE_HANG_CHECK_ARMED, + SXE_TX_XDP_RING, + SXE_TX_DISABLED, +}; + +#define SXE_TX_HANG_CHECK_ACTIVE(ring) \ + set_bit(SXE_TX_DETECT_HANG, &(ring)->state) +#define SXE_TX_HANG_CHECK_COMPLETE(ring) \ + clear_bit(SXE_TX_DETECT_HANG, &(ring)->state) +#define SXE_DETECT_TX_HANG_NEED(ring) \ + test_bit(SXE_TX_DETECT_HANG, &(ring)->state) + +struct sxe_ring_stats { + u64 packets; + u64 bytes; +}; + +struct sxe_tx_ring_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct sxe_rx_ring_stats { + u64 lro_count; + u64 lro_flush; + u64 non_eop_descs; + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +struct sxe_ring_desc { + void *base_addr; + u8 __iomem *tail; + dma_addr_t dma; +}; + +struct sxe_ring { + struct sxe_ring *next; + struct sxe_irq_data *irq_data; + struct net_device *netdev; + struct device *dev; + + u8 idx; + u8 reg_idx; + u8 tc_idx; + u16 ring_idx; + + unsigned long state; + + u16 next_to_use; + u16 next_to_clean; + + u16 depth; + u32 size; + struct sxe_ring_desc desc; + + union { + struct sxe_tx_buffer *tx_buffer_info; + struct sxe_rx_buffer *rx_buffer_info; + }; + + union { + u16 next_to_alloc; + + struct { + u8 fnav_sample_rate; + u8 fnav_sample_count; + }; + }; + + unsigned long last_rx_timestamp; + + u16 rx_offset; + + struct bpf_prog *xdp_prog; +#ifndef HAVE_NO_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif + spinlock_t tx_lock; + +#ifdef HAVE_AF_XDP_ZERO_COPY +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct zero_copy_allocator zca; +#endif +#ifndef HAVE_NETDEV_BPF_XSK_BUFF_POOL + struct xdp_umem *xsk_pool; +#else + struct xsk_buff_pool *xsk_pool; +#endif + u16 rx_buf_len; +#endif + struct sxe_ring_stats stats; + struct u64_stats_sync syncp; + union { + struct sxe_tx_ring_stats tx_stats; + struct sxe_rx_ring_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +struct sxe_ring_context { + u16 num; + u16 depth; + struct sxe_ring *ring[SXE_TXRX_RING_NUM_MAX] ____cacheline_aligned_in_smp; +}; + +struct sxe_ring_feature { + u16 rss_limit; + union { + u16 tc_per_pool; + u16 ring_per_pool; + u16 ring_per_tc; + u16 rss; + }; + + u16 fnav_limit; + u16 fnav_num; +} ____cacheline_internodealigned_in_smp; + +struct sxe_pool_feature { + u16 total_num; + u16 pf_num_limit; + u16 pf_num_used; + u16 vf_num_used; +} ____cacheline_internodealigned_in_smp; + +union sxe_tx_data_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct sxe_tx_context_desc { + __le32 vlan_macip_lens; + __le32 sa_idx; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +union sxe_rx_data_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; +struct sxe_tx_buffer { + union sxe_tx_data_desc *next_to_watch; + unsigned long time_stamp; + union { + struct sk_buff *skb; +#ifdef HAVE_XDP_SUPPORT + struct xdp_frame *xdpf; +#endif + }; + u32 bytecount; + u16 gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_features; +}; + +struct sxe_rx_buffer { +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct sk_buff *skb; + dma_addr_t dma; +#endif + union { + struct { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct sk_buff *skb; + dma_addr_t dma; +#endif + struct page *page; + u32 page_offset; + u16 pagecnt_bias; + }; +#ifdef HAVE_AF_XDP_ZERO_COPY + struct { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bool discard; + struct xdp_buff *xdp; +#else + void *addr; + u64 handle; +#endif + }; +#endif + }; +}; + +u16 sxe_rss_num_get(struct sxe_adapter *adapter); + +u16 sxe_rss_mask_get(struct sxe_adapter *adapter); + +u16 sxe_pool_mask_get(struct sxe_adapter *adapter); + +void sxe_ring_num_set(struct sxe_adapter *adapter); + +void sxe_ring_reg_map(struct sxe_adapter *adapter); + +void sxe_ring_feature_init(struct sxe_adapter *adapter); + +void sxe_ring_stats_init(struct sxe_adapter *adapter); + +static inline __le32 sxe_status_err_check(union sxe_rx_data_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +void sxe_tx_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx); + +void sxe_xdp_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx); + +void sxe_rx_ring_init(struct sxe_adapter *adapter, u16 base, + u16 cnt, u16 ring_idx, u16 irq_idx); +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.c new file mode 100644 index 000000000000..7f3a0d493c0b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.c @@ -0,0 +1,1900 @@ +#include +#include +#include + +#include "sxe.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxe_log.h" +#include "sxe_hw.h" +#include "sxe_rx_proc.h" +#include "sxe_netdev.h" +#include "sxe_pci.h" +#include "sxe_debug.h" +#include "sxe_csum.h" +#include "sxe_filter.h" +#include "sxe_ptp.h" +#include "sxe_ipsec.h" +#include "sxe_xdp.h" + +#ifdef SXE_DRIVER_TRACE +#include "sxe_trace.h" +#endif + +#ifdef NEED_SKB_FRAG_SIZE_API +#define skb_frag_size(frag) skb_frag_size_compat(frag) +static inline unsigned int skb_frag_size_compat(const skb_frag_t *frag) +{ + return frag->size; +} +#endif + +#ifdef NEED_SKB_FRAG_OFF_API +#define skb_frag_off(frag) skb_frag_off_compat(frag) +static inline unsigned int skb_frag_off_compat(const skb_frag_t *frag) +{ + return frag->page_offset; +} +#endif + +#ifdef NEED_SKB_FRAG_OFF_ADD_API +#define skb_frag_off_add(frag, delta) skb_frag_off_add_compat(frag, delta) +static inline void skb_frag_off_add_compat(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#ifdef XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +static inline int xdp_rxq_info_reg_compat(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + xdp_rxq_info_reg_compat(xdp_rxq, dev, queue_index, napi_id) +#endif + +#ifndef HAVE_NO_XDP_BUFF_RXQ +static u32 sxe_rx_napi_id_get(struct sxe_ring *rx_ring) +{ + struct sxe_irq_data *data = rx_ring->irq_data; + + return data ? data->napi.napi_id : 0; +} +#endif + +STATIC int sxe_rx_ring_alloc(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + int ret; + u32 size; + s32 ring_node = NUMA_NO_NODE; + struct device *dev = ring->dev; + s32 dev_node = dev_to_node(dev); + + size = sizeof(struct sxe_rx_buffer) * ring->depth; + + if (ring->irq_data) { + ring_node = ring->irq_data->numa_node; + } + + ring->rx_buffer_info = vmalloc_node(size, ring_node); + if (NULL == ring->rx_buffer_info) { + LOG_ERROR_BDF("ring[%u] can not alloc mem from local numa mode[%d]," + "try remote\n", ring->idx, ring_node); + ring->rx_buffer_info = vmalloc(size); + if (NULL == ring->rx_buffer_info) { + LOG_ERROR_BDF("ring[%u] unable to allocate memory for the" + "rx_buffer_info\n", ring->idx); + ret = -ENOMEM; + goto l_rx_buf_alloc_failed; + } + } + + + ring->size = sizeof(union sxe_rx_data_desc) * ring->depth; + ring->size = ALIGN(ring->size, ALIGN_4K); + + set_dev_node(dev, ring_node); + ring->desc.base_addr = dma_alloc_coherent(dev, + ring->size, &ring->desc.dma, GFP_KERNEL); + set_dev_node(dev, dev_node); + if (NULL == ring->desc.base_addr) { + LOG_ERROR_BDF("ring[%u] unable to alloc base_addr memory from the " + "same numa mode[%d] with ring, try dev node\n", + ring->idx, ring_node); + ring->desc.base_addr = dma_alloc_coherent(dev, ring->size, + &ring->desc.dma, GFP_KERNEL); + if (NULL == ring->desc.base_addr) { + LOG_ERROR_BDF("ring[%u] unable to allocate memory for" + "the descriptor\n", ring->idx); + ret = -ENOMEM; + goto l_base_addr_alloc_failed; + } + } + + ring->next_to_clean = 0; + ring->next_to_use = 0; + +#ifndef HAVE_NO_XDP_BUFF_RXQ + ret = xdp_rxq_info_reg(&ring->xdp_rxq, adapter->netdev, + ring->idx, sxe_rx_napi_id_get(ring)); + if (ret < 0) { + goto l_xdp_rxq_reg_failed; + } +#endif + + ring->xdp_prog = adapter->xdp_prog; + + return 0; + +#ifndef HAVE_NO_XDP_BUFF_RXQ +l_xdp_rxq_reg_failed: + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; +#endif + +l_base_addr_alloc_failed: + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + +l_rx_buf_alloc_failed: + LOG_DEV_ERR("unable to allocate memory for the Rx descriptor ring\n"); + return ret; +} + +void sxe_rx_ring_buffer_clean(struct sxe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + struct sxe_rx_buffer *rx_buffer = &ring->rx_buffer_info[ntc]; + struct sk_buff *skb; +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + LOG_DEBUG("clean ring[%u] from ntc=%u, next_to_alloc=%u, rx_buffer[%p]\n", + ring->idx, ntc, ring->next_to_alloc, rx_buffer); +#ifdef HAVE_AF_XDP_ZERO_COPY + if (ring->xsk_pool) { + sxe_xsk_rx_ring_clean(ring); + goto l_skip_free; + } +#endif + while (ntc != ring->next_to_alloc) { + if (rx_buffer->skb) { + skb = rx_buffer->skb; + if (SXE_CTRL_BUFFER(skb)->page_released) { + dma_unmap_page_attrs(ring->dev, + SXE_CTRL_BUFFER(skb)->dma, + sxe_rx_pg_size(ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + } + dev_kfree_skb(skb); + } + + dma_sync_single_range_for_cpu(ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + sxe_rx_bufsz(ring), + DMA_FROM_DEVICE); + + dma_unmap_page_attrs(ring->dev, rx_buffer->dma, + sxe_rx_pg_size(ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + ntc++; + rx_buffer++; + if (ntc == ring->depth) { + ntc = 0; + rx_buffer = ring->rx_buffer_info; + } + } +#ifdef HAVE_AF_XDP_ZERO_COPY +l_skip_free: +#endif + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; + + return; +} + +void sxe_rx_ring_free(struct sxe_ring *ring) +{ + sxe_rx_ring_buffer_clean(ring); + + ring->xdp_prog = NULL; +#ifndef HAVE_NO_XDP_BUFF_RXQ + xdp_rxq_info_unreg(&ring->xdp_rxq); +#endif + + if (ring->rx_buffer_info) { + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + } + + if (ring->desc.base_addr) { + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; + } + + return; +} + +static void sxe_rss_key_free(struct sxe_adapter *adapter) +{ + if (adapter->rss_key) { + kfree(adapter->rss_key); + adapter->rss_key = NULL; + } + + return; +} + +void sxe_rx_resources_free(struct sxe_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + if (adapter->rx_ring_ctxt.ring[i]->desc.base_addr) { + sxe_rx_ring_free(adapter->rx_ring_ctxt.ring[i]); + } + } + + sxe_rss_key_free(adapter); + + return; +} + +STATIC inline s32 sxe_rss_key_init(struct sxe_adapter *adapter) +{ + s32 ret; + u32 *rss_key; + + if (!adapter->rss_key) { + rss_key = kzalloc(SXE_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) { + ret = -ENOMEM; + goto l_ret; + } + + netdev_rss_key_fill(rss_key, SXE_RSS_KEY_SIZE); + adapter->rss_key = rss_key; + } + + return 0; +l_ret: + return ret; +} + +static int sxe_rx_resources_alloc(struct sxe_adapter *adapter) +{ + int ret; + u16 i; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ret = sxe_rx_ring_alloc(adapter, adapter->rx_ring_ctxt.ring[i]); + if (!ret) { + continue; + } + + LOG_MSG_ERR(probe, "allocation for Rx ring %u failed\n", i); + goto l_err; + } + + ret = sxe_rss_key_init(adapter); + if (ret) { + LOG_ERROR_BDF("sxe_rss_key_init failed, out of memory\n"); + goto l_err; + } + + return 0; + +l_err: + while (i--) { + sxe_rx_ring_free(adapter->rx_ring_ctxt.ring[i]); + } + + return ret; +} + +s32 sxe_rx_ring_depth_reset(struct sxe_adapter *adapter, u32 rx_cnt) +{ + s32 ret; + u32 i, rx_ring_cnt; + struct sxe_ring *temp_ring; + struct sxe_ring **rx_ring = adapter->rx_ring_ctxt.ring; + + rx_ring_cnt = adapter->rx_ring_ctxt.num; + temp_ring = vmalloc(array_size(rx_ring_cnt, sizeof(struct sxe_ring))); + if (!temp_ring) { + LOG_ERROR_BDF("vmalloc failed, size=%lu\n", + array_size(rx_ring_cnt, sizeof(struct sxe_ring))); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + memcpy(&temp_ring[i], rx_ring[i], sizeof(struct sxe_ring)); + +#ifndef HAVE_NO_XDP_BUFF_RXQ + memset(&temp_ring[i].xdp_rxq, 0, sizeof(temp_ring[i].xdp_rxq)); +#endif + temp_ring[i].depth = rx_cnt; + ret = sxe_rx_ring_alloc(adapter, &temp_ring[i]); + if (ret) { + LOG_ERROR_BDF("xdp ring alloc failed, rx ring idx=%d\n", i); + goto l_rx_free; + } + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxe_rx_ring_free(rx_ring[i]); + memcpy(rx_ring[i], &temp_ring[i], sizeof(struct sxe_ring)); + } + + adapter->rx_ring_ctxt.depth = rx_cnt; + goto l_temp_free; + +l_rx_free: + while (i--) { + sxe_rx_ring_free(&temp_ring[i]); + } + +l_temp_free: + vfree(temp_ring); + +l_end: + return ret; +} + +STATIC inline void sxe_redir_tbl_init(struct sxe_adapter *adapter) +{ + u32 i, j; + u32 tbl_entries = sxe_rss_redir_tbl_size_get(); + u16 rss_num = sxe_rss_num_get(adapter); + struct sxe_hw *hw = &adapter->hw; + + if ((adapter->cap & SXE_SRIOV_ENABLE) && + (rss_num < MIN_QUEUES_IN_SRIOV)) { + LOG_INFO_BDF("sriov enabled and rss ring num = %u too less, " + "adjust to %u\n", rss_num, MIN_QUEUES_IN_SRIOV); + rss_num = MIN_QUEUES_IN_SRIOV; + } + + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < tbl_entries; i++, j++) { + if (j == rss_num) { + j = 0; + } + + adapter->rss_indir_tbl[i] = j; + } + + hw->dbu.ops->rss_redir_tbl_set_all(hw, adapter->rss_indir_tbl); + + return; +} + +static inline void sxe_hw_rss_configure(struct sxe_adapter *adapter) +{ + u32 version = 0; + struct sxe_hw *hw = &adapter->hw; + u32 rss_num = sxe_rss_num_get(adapter); + u16 pool = adapter->pool_f.pf_num_used; + + hw->dbu.ops->rss_key_set_all(hw, adapter->rss_key); + + hw->filter.mac.ops->rx_udp_frag_checksum_disable(hw); + + if (adapter->cap & SXE_RSS_FIELD_IPV4_UDP) { + version = SXE_RSS_IP_VER_4; + } else if(adapter->cap & SXE_RSS_FIELD_IPV6_UDP) { + version = SXE_RSS_IP_VER_6; + } + hw->dbu.ops->rss_hash_pkt_type_set(hw, version); + + hw->dbu.ops->rss_rings_used_set(hw, rss_num, pool, PF_POOL_INDEX(0)); + + sxe_redir_tbl_init(adapter); + + return; +} + +static void sxe_lro_configure(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + bool is_enable = !!(adapter->cap & SXE_LRO_ENABLE); + + hw->dbu.ops->lro_ack_switch(hw, false); + + hw->dma.ops->rx_dma_lro_ctl_set(hw); + + if (is_enable) { + hw->filter.mac.ops->rx_nfs_filter_disable(hw); + } + + hw->filter.mac.ops->rx_lro_enable(hw, is_enable); + + return; +} + +static u32 sxe_high_water_mark_get(struct sxe_adapter *adapter, + u8 pkt_buf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + u32 rx_pkt_buf_size; + + tc = link = dev->mtu + SXE_ETH_DEAD_LOAD + SXE_ETH_FRAMING; + + dv_id = SXE_DV(link, tc); + + if (adapter->cap & SXE_SRIOV_ENABLE) { + dv_id += SXE_B2BT(tc); + } + + kb = SXE_BT2KB(dv_id); + rx_pkt_buf_size = hw->dbu.ops->rx_pkt_buf_size_get(hw, pkt_buf_idx); + rx_pba = rx_pkt_buf_size >> 10; + + marker = rx_pba - kb; + + if (marker < 0) { + LOG_MSG_WARN(drv, "packet buffer(%i) can not provide enough" + "headroom to support flow control." + "decrease mtu or number of traffic classes\n", + pkt_buf_idx); + marker = tc + 1; + } + + return marker; +} + +static u32 sxe_low_water_mark_get(struct sxe_adapter *adapter, + u8 pkt_buf_idx) +{ + struct net_device *dev = adapter->netdev; + int tc; + u32 dv_id; + + tc = dev->mtu + SXE_ETH_DEAD_LOAD; + + dv_id = SXE_LOW_DV(tc); + + return SXE_BT2KB(dv_id); +} + +static void sxe_link_fc_water_mark_get(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + int num_tc = sxe_dcb_tc_get(adapter); + u32 i, high_mark, low_mark; + + if (!num_tc) { + num_tc = 1; + } + + for (i = 0; i < num_tc; i++) { + high_mark = sxe_high_water_mark_get(adapter, i); + low_mark = sxe_low_water_mark_get(adapter, i); + + hw->mac.ops->fc_tc_high_water_mark_set(hw, i, high_mark); + hw->mac.ops->fc_tc_low_water_mark_set(hw, i, low_mark); + + if (low_mark > high_mark) { + hw->mac.ops->fc_tc_low_water_mark_set(hw, i, 0); + } + } + + for (; i < MAX_TRAFFIC_CLASS; i++) { + hw->mac.ops->fc_tc_high_water_mark_set(hw, i, 0); + } + + return; +} + +static void sxe_hw_rx_buf_configure(struct sxe_adapter *adapter) +{ + u32 headroom; + struct sxe_hw *hw = &adapter->hw; + u8 tcs = sxe_dcb_tc_get(adapter); + + hw->dbu.ops->rx_pkt_buf_switch(hw, false); + + if (adapter->cap & SXE_FNAV_SAMPLE_ENABLE || + adapter->cap & SXE_FNAV_SPECIFIC_ENABLE) { + headroom = SXE_FNAV_RULES_TABLE_PKT_SIZE << adapter->fnav_ctxt.rules_table_size; + } else { + headroom = 0; + } + + LOG_DEBUG_BDF("config pkg buf: tcs=%u, headroom=%u\n", tcs, headroom); + hw->dbu.ops->rx_pkt_buf_size_configure(hw, tcs, headroom, PBA_STRATEGY_EQUAL); + + sxe_link_fc_water_mark_get(adapter); + + hw->dbu.ops->rx_pkt_buf_switch(hw, true); + + return; +} + +static void sxe_rx_hw_dma_ctrl_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->dbu.ops->rx_cap_switch_off(hw); + + hw->dma.ops->rx_dma_ctrl_init(hw); + return; +} + +static void sxe_ring_lro_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + clear_ring_lro_enabled(ring); + + if (adapter->cap & SXE_LRO_ENABLE) { + set_ring_lro_enabled(ring); + LOG_INFO_BDF("ring[%u] lro enable\n", ring->idx); + } + return; +} + +static void sxe_rx_buffer_size_set(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + int max_frame = sxe_sw_mtu_get(adapter); +#endif +#endif + + clear_bit(SXE_RX_3K_BUFFER, &ring->state); + clear_bit(SXE_RX_BUILD_SKB_ENABLED, &ring->state); + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + if (adapter->cap & SXE_RX_LEGACY) { + goto l_end; + } + + set_bit(SXE_RX_BUILD_SKB_ENABLED, &ring->state); + +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + if (adapter->cap & SXE_LRO_ENABLE) { + set_bit(SXE_RX_3K_BUFFER, &ring->state); + } + + if (SXE_2K_TOO_SMALL_WITH_PADDING || + (max_frame > (ETH_DATA_LEN + SXE_ETH_DEAD_LOAD))) { + set_bit(SXE_RX_3K_BUFFER, &ring->state); + } +#endif +#else + adapter->cap |= SXE_RX_LEGACY; +#endif + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +l_end: +#endif + return; +} + +static void sxe_rx_ring_lro_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + struct sxe_hw *hw = &adapter->hw; + u8 reg_idx = ring->reg_idx; + + if (!ring_is_lro_enabled(ring)) { + return; + } else { + hw->dma.ops->rx_lro_ctl_configure(hw, reg_idx, SXE_LROCTL_MAXDESC_16); + } + + return; +} + +STATIC bool sxe_mapped_page_alloc(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buf) +{ + bool ret; + struct page *page = rx_buf->page; + dma_addr_t dma; + u16 order; +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + if (likely(page)) { + ret = true; + goto l_ret; + } + + order = sxe_rx_pg_order(rx_ring); + LOG_DEBUG("ring[%u] page order = %u\n", rx_ring->idx, order); + + page = dev_alloc_pages(order); + if (unlikely(!page)) { + LOG_DEBUG("ring[%u] page alloc failed\n", rx_ring->idx); + rx_ring->rx_stats.alloc_rx_page_failed++; + ret = false; + goto l_ret; + } + + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + sxe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + + if (dma_mapping_error(rx_ring->dev, dma)) { + LOG_DEBUG("ring[%u] dma mapping failed\n", rx_ring->idx); + __free_pages(page, order); + + rx_ring->rx_stats.alloc_rx_page_failed++; + ret = false; + goto l_ret; + } + + rx_buf->dma = dma; + rx_buf->page = page; + rx_buf->page_offset = rx_ring->rx_offset; + page_ref_add(page, USHRT_MAX - 1); + rx_buf->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + LOG_DEBUG("ring[%u] mapped alloc succeed:page=%p, dma=%llu, " + "page_offset=%u,ref_count=%d, pagecnt_bias=%u\n", + rx_ring->idx, page, rx_buf->dma, rx_buf->page_offset, + page_ref_count(page), rx_buf->pagecnt_bias); + + return true; + +l_ret: + return ret; +} + +void sxe_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 cleaned_count) +{ + union sxe_rx_data_desc *rx_desc; + struct sxe_rx_buffer *rx_buffer; + u16 ntu = rx_ring->next_to_use; + u16 bufsz; + + LOG_DEBUG("ring[%u][%p] entry, cleand_count=%u, next_to_use=%u\n", + rx_ring->idx, rx_ring, cleaned_count, ntu); + + if (!cleaned_count) { + return; + } + + rx_desc = SXE_RX_DESC(rx_ring, ntu); + rx_buffer = &rx_ring->rx_buffer_info[ntu]; + ntu -= rx_ring->depth; + + bufsz = sxe_rx_bufsz(rx_ring); + + do { + if (!sxe_mapped_page_alloc(rx_ring, rx_buffer)) { + LOG_DEBUG("ring[%u] page alloc failed, clean_count" + "left:%u\n",rx_ring->idx, cleaned_count); + break; + } + + dma_sync_single_range_for_device(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, bufsz, + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer->dma + rx_buffer->page_offset); + + rx_desc++; + rx_buffer++; + ntu++; + if (unlikely(!ntu)) { + rx_desc = SXE_RX_DESC(rx_ring, 0); + rx_buffer = rx_ring->rx_buffer_info; + ntu -= rx_ring->depth; + } + + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + ntu += rx_ring->depth; + + if (rx_ring->next_to_use != ntu) { + rx_ring->next_to_use = ntu; + + rx_ring->next_to_alloc = ntu; + + wmb(); + writel(ntu, rx_ring->desc.tail); + } + + LOG_DEBUG("ring[%u] quit, next_to_use=%u\n", + rx_ring->idx, rx_ring->next_to_use); + + return; +} + +s32 sxe_rss_hash_conf_get(struct sxe_adapter *adapter, + struct sxe_rss_hash_config *rss_conf) +{ + struct sxe_hw *hw = &adapter->hw; + u8 *hash_key; + u32 rss_field; + u32 rss_key; + u64 rss_hf; + u16 i; + + hash_key = rss_conf->rss_key; + if (hash_key != NULL) { + for (i = 0; i < SXE_MAX_RSS_KEY_ENTRIES; i++) { + rss_key = sxe_hw_rss_key_get_by_idx(hw, i); + hash_key[(i * 4)] = rss_key & 0x000000FF; + hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF; + hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF; + hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF; + } + } + + rss_hf = 0; + rss_field = sxe_hw_rss_field_get(hw); + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV4) { + rss_hf |= ETH_RSS_IPV4; + } + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_TCP) { + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + } + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV4_UDP) { + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + } + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV6) { + rss_hf |= ETH_RSS_IPV6; + } + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_TCP) { + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + } + + if (rss_field & SXE_MRQC_RSS_FIELD_IPV6_UDP) { + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + } + + LOG_DEBUG("got rss hash func=%lld\n", rss_hf); + rss_conf->rss_hf = rss_hf; + + return 0; +} + +#ifdef HAVE_AF_XDP_ZERO_COPY +static void sxe_hw_rx_buffer_configure(struct sxe_hw *hw, + u8 reg_idx, unsigned long state, + bool is_xsk_on, u32 xsk_buf_len) +#else +static void sxe_hw_rx_buffer_configure(struct sxe_hw *hw, + u8 reg_idx, unsigned long state) +#endif +{ + u32 pkg_buf_len; +#ifdef HAVE_AF_XDP_ZERO_COPY + if (is_xsk_on) { + pkg_buf_len = xsk_buf_len; + } else if (test_bit(SXE_RX_3K_BUFFER, &state)) { +#else + if (test_bit(SXE_RX_3K_BUFFER, &state)) { +#endif + pkg_buf_len = SXE_RXBUFFER_3K; + } else { + pkg_buf_len = SXE_RXBUFFER_2K; + } + + hw->dma.ops->rx_rcv_ctl_configure(hw, reg_idx, + SXE_RX_HDR_SIZE, pkg_buf_len); + + return; +} + +static void sxe_rx_ring_reg_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + struct sxe_hw *hw = &adapter->hw; + u64 desc_dma_addr = ring->desc.dma; + u8 reg_idx = ring->reg_idx; +#ifdef HAVE_AF_XDP_ZERO_COPY + u32 xsk_buf_len = 0; +#endif + u32 desc_mem_len; + + hw->dma.ops->rx_ring_switch(hw, reg_idx, false); + + desc_mem_len = ring->depth * sizeof(union sxe_rx_data_desc); + hw->dma.ops->rx_ring_desc_configure(hw, desc_mem_len, desc_dma_addr, reg_idx); + + ring->desc.tail = adapter->hw.reg_base_addr + SXE_RDT(reg_idx); + +#ifdef HAVE_AF_XDP_ZERO_COPY + if (ring->xsk_pool) { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); +#else + xsk_buf_len = ring->xsk_pool->chunk_size_nohr - + XDP_PACKET_HEADROOM; +#endif + ring->rx_buf_len = xsk_buf_len; + } +#endif + +#ifdef HAVE_AF_XDP_ZERO_COPY + sxe_hw_rx_buffer_configure(hw, reg_idx, ring->state, + !!ring->xsk_pool, xsk_buf_len); +#else + sxe_hw_rx_buffer_configure(hw, reg_idx, ring->state); +#endif + + sxe_rx_ring_lro_configure(adapter, ring); + + hw->dma.ops->rx_desc_thresh_set(hw, reg_idx); + + hw->dma.ops->rx_ring_switch(hw, reg_idx, true); + + return; +} + +#ifdef HAVE_AF_XDP_ZERO_COPY +static inline void sxe_rx_ring_xdp_mem_mode_init( + struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + ring->xsk_pool = sxe_xsk_pool_get(adapter, ring); + if (ring->xsk_pool) { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL)); + xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); +#else + ring->zca.free = sxe_zca_free; + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca)); +#endif + } else { + WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL)); + } + + return; +} +#endif +static inline void sxe_rx_desc_init(struct sxe_ring *rx_ring) +{ + union sxe_rx_data_desc *desc; + + desc = SXE_RX_DESC(rx_ring, 0); + desc->wb.upper.length = 0; + + return; +} + +static inline u32 sxe_rx_offset(struct sxe_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? SXE_SKB_PAD : 0; +} + +static inline void sxe_rx_offset_init(struct sxe_ring *rx_ring) +{ + rx_ring->rx_offset = sxe_rx_offset(rx_ring); + + return; +} + +void sxe_rx_ring_attr_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ +#ifdef HAVE_AF_XDP_ZERO_COPY + sxe_rx_ring_xdp_mem_mode_init(adapter, ring); +#endif + + sxe_ring_lro_configure(adapter, ring); + + sxe_rx_buffer_size_set(adapter, ring); + + sxe_rx_ring_reg_configure(adapter, ring); + sxe_rx_ring_buffer_init(ring); + sxe_rx_desc_init(ring); + sxe_rx_offset_init(ring); +#ifdef HAVE_AF_XDP_ZERO_COPY + if (ring->xsk_pool) { + sxe_zc_rx_ring_buffers_alloc(ring, sxe_desc_unused(ring)); + } else { + sxe_rx_ring_buffers_alloc(ring, sxe_desc_unused(ring)); + } +#else + sxe_rx_ring_buffers_alloc(ring, sxe_desc_unused(ring)); +#endif + return; +} + +static void sxe_rx_ring_configure(struct sxe_adapter *adapter) +{ + u32 i; + u8 tcs = sxe_dcb_tc_get(adapter); + struct sxe_hw *hw = &adapter->hw; + u16 mask = sxe_pool_mask_get(adapter); + bool enable = !!(adapter->cap & SXE_SRIOV_ENABLE); + bool is_4Q; + struct sxe_ring **ring = adapter->rx_ring_ctxt.ring; + + if (mask == SXE_4Q_PER_POOL_MASK) { + is_4Q = true; + } else { + is_4Q = false; + } + + hw->dbu.ops->rx_multi_ring_configure(hw, tcs, is_4Q, enable); + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxe_rx_ring_attr_configure(adapter, ring[i]); + } + + return; +} + +static void sxe_vlan_restore(struct sxe_adapter *adapter) +{ + u16 vid = 1; + + sxe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit_from(vid, adapter->vlan_ctxt.active_vlans, VLAN_N_VID) + sxe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); + + return; +} + +static void sxe_fnav_configure(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + if (adapter->cap & SXE_FNAV_SAMPLE_ENABLE) { + hash_init(adapter->fnav_ctxt.sample_list); + adapter->fnav_ctxt.is_sample_table_overflowed = false; + hw->dbu.ops->fnav_mode_init(hw, + adapter->fnav_ctxt.rules_table_size, + SXE_FNAV_SAMPLE_MODE); + } else if (adapter->cap & SXE_FNAV_SPECIFIC_ENABLE) { + hw->dbu.ops->fnav_mode_init(hw, + adapter->fnav_ctxt.rules_table_size, + SXE_FNAV_SPECIFIC_MODE); + sxe_fnav_rules_restore(adapter); + } + + return; +} + +void sxe_hw_rx_configure(struct sxe_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + sxe_fnav_configure(adapter); + + sxe_hw_rx_buf_configure(adapter); + + __sxe_set_rx_mode(netdev, true); + + sxe_vlan_restore(adapter); + + sxe_rx_hw_dma_ctrl_init(adapter); + + sxe_hw_rss_configure(adapter); + + sxe_lro_configure(adapter); + + sxe_rx_ring_configure(adapter); + + return; +} + +s32 sxe_test_rx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring) +{ + s32 ret; + u32 reg_data; + union sxe_rx_data_desc *desc; + struct sxe_hw *hw = &adapter->hw; + + ring->depth = SXE_DEFAULT_DESC_CNT; + ring->idx = 0; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->reg_idx = adapter->rx_ring_ctxt.ring[0]->reg_idx; + + ret = sxe_rx_ring_alloc(adapter, ring); + if (ret) { + goto l_end; + } + + sxe_hw_rx_buf_configure(adapter); + + hw->dbu.ops->rx_cap_switch_off(hw); + + desc = SXE_RX_DESC(ring, 0); + desc->wb.upper.length = 0; + + sxe_rx_buffer_size_set(adapter, ring); + + sxe_rx_ring_reg_configure(adapter, ring); + sxe_rx_ring_buffer_init(ring); + sxe_rx_ring_buffers_alloc(ring, sxe_desc_unused(ring)); + + reg_data = hw->filter.mac.ops->rx_mode_get(hw); + reg_data |= SXE_FCTRL_BAM | SXE_FCTRL_SBP | SXE_FCTRL_MPE; + hw->filter.mac.ops->rx_mode_set(hw, reg_data); + + hw->dbu.ops->rx_cap_switch_on(hw); + +l_end: + return ret; +} + +s32 sxe_rx_configure(struct sxe_adapter *adapter) +{ + s32 ret; + u16 queues; + struct net_device *netdev = adapter->netdev; + + ret = sxe_rx_resources_alloc(adapter); + if (ret) { + LOG_ERROR_BDF("allocation for Rx resources failed:Out of memory\n"); + goto l_ret; + } + + sxe_hw_rx_configure(adapter); + + queues = adapter->rx_ring_ctxt.num; + ret = netif_set_real_num_rx_queues(netdev, queues); + if (ret) { + goto l_err_clean; + } + + return 0; + +l_err_clean: + sxe_rx_release(adapter); + +l_ret: + return ret; +} + +static void sxe_rx_dma_sync(struct sxe_ring *rx_ring, + struct sk_buff *skb) +{ +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + if (ring_uses_build_skb(rx_ring)) { + unsigned long mask = (unsigned long)sxe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; + + dma_sync_single_range_for_cpu(rx_ring->dev, + SXE_CTRL_BUFFER(skb)->dma, + offset, + skb_headlen(skb), + DMA_FROM_DEVICE); + } else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + SXE_CTRL_BUFFER(skb)->dma, + skb_frag_off(frag), + skb_frag_size(frag), + DMA_FROM_DEVICE); + } + + if (unlikely(SXE_CTRL_BUFFER(skb)->page_released)) { + dma_unmap_page_attrs(rx_ring->dev, SXE_CTRL_BUFFER(skb)->dma, + sxe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + } + + return; +} + +static struct sxe_rx_buffer *sxe_rx_buffer_get(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff **skb, + const u32 size, + s32 *rx_buffer_pgcnt) +{ + struct sxe_rx_buffer *rx_buffer; + bool is_end_pkt = !!sxe_status_err_check(rx_desc, SXE_RXD_STAT_EOP); + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + *rx_buffer_pgcnt = +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + page_count(rx_buffer->page); +#else + 0; +#endif + + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + if (!is_end_pkt) { + if (!*skb) { + LOG_DEBUG("start a chain or lro rcv on ring[%u]\n", + rx_ring->idx); + goto l_skip_sync; + } + } else { + if (*skb) { + sxe_rx_dma_sync(rx_ring, *skb); + } + } + + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); +l_skip_sync: + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void sxe_add_rx_frag_to_skb(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + struct sk_buff *skb, + u32 size) +{ +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + u32 truesize = sxe_rx_pg_size(rx_ring) / 2; +#else + u32 truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(rx_ring->rx_offset + size) : + SKB_DATA_ALIGN(size); +#endif + LOG_DEBUG("rx_ring[%u] add data in page[%p], offset[%u], size" + "to skb frag[%u],use truesize[%u]\n", + rx_ring->idx, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + return; + +} + +#ifndef HAVE_NET_PREFETCH_API +static inline void net_prefetch(void *data) +{ + prefetch(data); +#if L1_CACHE_BYTES < 128 + prefetch(data + L1_CACHE_BYTES); +#endif +} +#endif + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +STATIC struct sk_buff *sxe_skb_build(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union sxe_rx_data_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + u32 metasize = xdp->data - xdp->data_meta; +#endif + +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + u32 truesize = sxe_rx_pg_size(rx_ring) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + +#ifdef HAVE_XDP_BUFF_DATA_META + net_prefetch(xdp->data_meta); +#else + net_prefetch(xdp->data); +#endif + + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) { + goto l_ret; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) { + skb_metadata_set(skb, metasize); + } +#endif + + if (!sxe_status_err_check(rx_desc, SXE_RXD_STAT_EOP)) { + SXE_CTRL_BUFFER(skb)->dma = rx_buffer->dma; + } + +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + +l_ret: + return skb; +} +#endif + +STATIC struct sk_buff *sxe_skb_construct(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union sxe_rx_data_desc *rx_desc) +{ + u32 size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + u32 truesize = sxe_rx_pg_size(rx_ring) / 2; +#else + u32 truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + + net_prefetch(xdp->data); + + + skb = napi_alloc_skb(&rx_ring->irq_data->napi, SXE_RX_HDR_SIZE); + if (unlikely(!skb)) { + goto l_ret; + } + + if (size > SXE_RX_HDR_SIZE) { + if (!sxe_status_err_check(rx_desc, SXE_RXD_STAT_EOP)) { + SXE_CTRL_BUFFER(skb)->dma = rx_buffer->dma; + } + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), + xdp->data, ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + +l_ret: + return skb; +} + +#ifndef HAVE_DEV_PAGE_IS_REUSABLE_API +static inline bool dev_page_is_reusable_compat(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && \ + !page_is_pfmemalloc(page)); +} +#define dev_page_is_reusable dev_page_is_reusable_compat +#endif +STATIC bool sxe_is_rx_page_can_reuse(struct sxe_rx_buffer *rx_buffer, + s32 rx_buffer_pgcnt) +{ + u16 pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + if (!dev_page_is_reusable(page)) { + LOG_INFO("page[%p] can not reuse since it is reserved page, " + "page_numa_id=%d, cpu_numa_id=%d, pfmemalloc:%s\n", + page, page_to_nid(page), numa_mem_id(), + page_is_pfmemalloc(page) ? "yes" : "no"); + goto l_false; + } + +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) { + LOG_INFO("page[%p] can not reuse page_ref=%d --- bias=%d\n", + page, rx_buffer_pgcnt, pagecnt_bias); + goto l_false; + } +#else + +#define SXE_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - SXE_RXBUFFER_3K) + if (rx_buffer->page_offset > SXE_LAST_OFFSET) { + LOG_INFO("page[%p] can not reuse rx_buffer->page_offset:%u > %u\n", + page, rx_buffer->page_offset, SXE_LAST_OFFSET); + goto l_false; + } +#endif + + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; + +l_false: + return false; +} + +static void sxe_rx_page_reuse(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *old_buff) +{ + struct sxe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->depth) ? nta : 0; + + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; + + return; +} + +static inline void sxe_rx_page_release(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + if (!IS_ERR(skb) && SXE_CTRL_BUFFER(skb)->dma == rx_buffer->dma) { + SXE_CTRL_BUFFER(skb)->page_released = true; + } else { + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + sxe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXE_RX_DMA_ATTR); +#endif + } + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + return; +} + +static void sxe_rx_buffer_put(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + struct sk_buff *skb, + s32 rx_buffer_pgcnt) +{ + if (sxe_is_rx_page_can_reuse(rx_buffer, rx_buffer_pgcnt)) { + sxe_rx_page_reuse(rx_ring, rx_buffer); + } else { + LOG_DEBUG("ring[%u], rx_buffer[%p]'s page[%p] can release\n", + rx_ring->idx, rx_buffer, rx_buffer->page); + sxe_rx_page_release(rx_ring, rx_buffer, skb); + } + + rx_buffer->page = NULL; + rx_buffer->skb = NULL; + + return; +} + +static bool sxe_is_non_eop(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->depth) ? ntc : 0; + rx_ring->next_to_clean = ntc; + LOG_DEBUG("next to clean:ntc[%u]\n", ntc); + + prefetch(SXE_RX_DESC(rx_ring, ntc)); + + if (ring_is_lro_enabled(rx_ring)) { + __le32 lro_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(SXE_RXDADV_LROCNT_MASK); + if (unlikely(lro_enabled)) { + u32 lro_cnt = le32_to_cpu(lro_enabled); + + lro_cnt >>= SXE_RXDADV_LROCNT_SHIFT; + SXE_CTRL_BUFFER(skb)->lro_cnt += lro_cnt - 1; + + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= SXE_RXDADV_NEXTP_MASK; + ntc >>= SXE_RXDADV_NEXTP_SHIFT; + LOG_DEBUG("enter lro static lro_cnt=%u, ntc =%u\n", + lro_cnt, ntc); + } + } + + if (likely(sxe_status_err_check(rx_desc, SXE_RXD_STAT_EOP))) { + LOG_DEBUG("the rx_desc[%p]'s last packet arrived\n", rx_desc); + goto l_false; + } + + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + LOG_DEBUG("in chain mode, ntc[%u]\n", ntc); + + return true; + +l_false: + return false; +} + +#ifdef ETH_GET_HEADLEN_API_NEED_2_PARAM +static inline u32 +eth_get_headlen_compat(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) eth_get_headlen_compat(dev, data, len) +#endif +static void sxe_tail_pull(struct sxe_ring *rx_ring, + struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + u32 pull_len; + + va = skb_frag_address(frag); + + pull_len = eth_get_headlen(skb->dev, va, SXE_RX_HDR_SIZE); + + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; + + return; +} + +bool sxe_headers_cleanup(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + bool ret = false; + + if (IS_ERR(skb)) { + ret = true; + goto l_ret; + } + + if (!skb_headlen(skb)) { + LOG_DEBUG("ring[%u] place header in linear portion in skb\n", + rx_ring->idx); + sxe_tail_pull(rx_ring, skb); + } + + if (eth_skb_pad(skb)) { + ret = true; + goto l_ret; + } + +l_ret: + return ret; +} + +static void sxe_lro_gso_size_set(struct sxe_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + SXE_CTRL_BUFFER(skb)->lro_cnt); + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + return; +} + +static void sxe_lro_stats_update(struct sxe_ring *rx_ring, + struct sk_buff *skb) +{ + if (!SXE_CTRL_BUFFER(skb)->lro_cnt) { + goto l_end; + } + + rx_ring->rx_stats.lro_count += SXE_CTRL_BUFFER(skb)->lro_cnt; + rx_ring->rx_stats.lro_flush++; + + LOG_DEBUG("ring[%u], lro_count=%llu, lro_flush=%llu\n", rx_ring->idx, + rx_ring->rx_stats.lro_count, rx_ring->rx_stats.lro_flush); + sxe_lro_gso_size_set(rx_ring, skb); + + SXE_CTRL_BUFFER(skb)->lro_cnt = 0; + +l_end: + return; +} + +static inline void sxe_rx_hash_set(struct sxe_ring *ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) { + goto l_end; + } + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + SXE_RXDADV_RSSTYPE_MASK; + if (!rss_type) { + goto l_end; + } + LOG_DEBUG("rss_type:%u, rss hash value:%u\n", rss_type, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss)); + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (SXE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +l_end: + return; +} + +void sxe_skb_fields_process(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + u32 cap = rx_ring->irq_data->adapter->cap; + + sxe_rx_hash_set(rx_ring, rx_desc, skb); + + sxe_lro_stats_update(rx_ring, skb); + + sxe_rx_csum_verify(rx_ring, rx_desc, skb); + + if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && + sxe_status_err_check(rx_desc, SXE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + LOG_DEBUG("rx vlan id=%u\n", vid); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + if (unlikely(cap & SXE_RX_HWTSTAMP_ENABLED)) { + sxe_ptp_rx_hwtstamp_process(rx_ring, rx_desc, skb); + } + +#ifdef SXE_IPSEC_CONFIGURE + sxe_rx_ipsec_proc(rx_ring, rx_desc, skb); +#endif + + if (netif_is_sxe(dev)) { + skb_record_rx_queue(skb, rx_ring->idx); + } else { + macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, + false); + } + + skb->protocol = eth_type_trans(skb, dev); + + return; +} + +void sxe_rx_skb_deliver(struct sxe_irq_data *irq_data, + struct sk_buff *skb) +{ + napi_gro_receive(&irq_data->napi, skb); + return; +} + +static inline u32 sxe_rx_frame_truesize(struct sxe_ring *rx_ring, u32 size) +{ + u32 truesize; + +#if (PAGE_SIZE < 8192) + truesize = sxe_rx_pg_size(rx_ring) / 2; +#else + truesize = rx_ring->rx_offset ? + SKB_DATA_ALIGN(rx_ring->rx_offset + size) +#ifdef HAVE_XDP_BUFF_FRAME_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + : SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +void sxe_rx_buffer_page_offset_update(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *rx_buffer, + u32 size) +{ + u32 truesize = sxe_rx_frame_truesize(rx_ring, size); + +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return; +} + +u32 sxe_rx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *rx_ring, + const u32 budget) +{ + u32 total_rx_bytes = 0; + u32 total_rx_packets = 0; + u32 rx_offset = rx_ring->rx_offset; + struct sxe_adapter *adapter = irq_data->adapter; + u16 cleaned_count = sxe_desc_unused(rx_ring); + u32 xdp_xmit = 0; + struct xdp_buff xdp; + struct sxe_ring_stats stats; + +#ifdef HAVE_XDP_BUFF_FRAME_SIZE + u32 frame_sz = 0; +#if (PAGE_SIZE < SXE_PAGE_SIZE_8KB) + frame_sz = sxe_rx_frame_truesize(rx_ring, 0); +#endif + +#ifdef HAVE_XDP_BUFF_INIT_API + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); +#else + xdp.frame_sz = frame_sz; +#endif +#endif + +#ifndef HAVE_NO_XDP_BUFF_RXQ + xdp.rxq = &rx_ring->xdp_rxq; +#endif + + LOG_DEBUG_BDF("entry rx irq clean:irq=%u, ring_idx=%u, ring_reg_idx=%u, " + "ring_tc_idx=%u, next_to_clean=%u, next_to_use=%u, budget=%u\n", + irq_data->irq_idx, rx_ring->idx, rx_ring->reg_idx, rx_ring->tc_idx, + rx_ring->next_to_clean, rx_ring->next_to_use, budget); + + while (likely(total_rx_packets < budget)) { + s32 rx_buffer_pgcnt; + union sxe_rx_data_desc *rx_desc; + struct sxe_rx_buffer *rx_buffer; + struct sk_buff *skb; + u32 size; + + if (cleaned_count >= SXE_RX_BUFFER_WRITE) { + sxe_rx_ring_buffers_alloc(rx_ring, cleaned_count); + cleaned_count = 0; + } + +#ifdef SXE_DRIVER_TRACE + SXE_TRACE_RX(rx_ring->idx, SXE_TRACE_LAB_RX_START); +#endif + + rx_desc = SXE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) { + break; + } + + LOG_DEBUG_BDF("process rx_desc[%u], write back info:" + "status_error=0x%x, length=%u, vlan=%u\n", + rx_ring->next_to_clean, + le16_to_cpu(rx_desc->wb.upper.status_error), + le16_to_cpu(rx_desc->wb.upper.length), + le16_to_cpu(rx_desc->wb.upper.vlan)); + + dma_rmb(); + + rx_buffer = sxe_rx_buffer_get(rx_ring, rx_desc, &skb, + size, &rx_buffer_pgcnt); + + if (!skb) { +#ifdef HAVE_XDP_PREPARE_BUFF_API + u8 *hard_start = page_address(rx_buffer->page) + + rx_buffer->page_offset - rx_offset; + xdp_prepare_buff(&xdp, hard_start, rx_offset, size, true); +#else + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif + xdp.data_hard_start = xdp.data - rx_offset; + xdp.data_end = xdp.data + size; +#endif + +#ifdef HAVE_XDP_BUFF_FRAME_SIZE +#if (PAGE_SIZE > SXE_PAGE_SIZE_4KB) + xdp.frame_sz = sxe_rx_frame_truesize(rx_ring, size); +#endif +#endif + skb = sxe_xdp_run(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = - PTR_ERR(skb); + LOG_DEBUG("get in xdp process\n"); + + if (xdp_res & (SXE_XDP_TX | SXE_XDP_REDIR)) { + xdp_xmit |= xdp_res; + sxe_rx_buffer_page_offset_update(rx_ring, + rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + sxe_add_rx_frag_to_skb(rx_ring, rx_buffer, skb, size); +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + } else if (ring_uses_build_skb(rx_ring)) { + skb = sxe_skb_build(rx_ring, rx_buffer, + &xdp, rx_desc); +#endif + } else { + skb = sxe_skb_construct(rx_ring, rx_buffer, + &xdp, rx_desc); + } + + if (!skb) { + LOG_INFO("skb is NULL, failed to process\n"); + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + sxe_rx_buffer_put(rx_ring, rx_buffer, skb, rx_buffer_pgcnt); + cleaned_count++; + +#ifdef SXE_DRIVER_TRACE + SXE_TRACE_RX(rx_ring->idx, SXE_TRACE_LAB_RX_END); +#endif + + if (sxe_is_non_eop(rx_ring, rx_desc, skb)) { + continue; + } + + if (sxe_headers_cleanup(rx_ring, rx_desc, skb)) { + continue; + } + + total_rx_bytes += skb->len; + + SKB_DUMP(skb); + + sxe_skb_fields_process(rx_ring, rx_desc, skb); + + LOG_DEBUG("in loop[%u], process total bytes:%u\n", + total_rx_packets + 1, skb->len); + sxe_rx_skb_deliver(irq_data, skb); + + total_rx_packets++; + } + +#ifdef HAVE_XDP_SUPPORT + if (xdp_xmit & SXE_XDP_REDIR) { + xdp_do_flush_map(); + } + + if (xdp_xmit & SXE_XDP_TX) { + struct sxe_ring *ring = sxe_xdp_tx_ring_pick(adapter); + sxe_xdp_ring_tail_update_locked(ring); + } +#endif + + stats.packets = total_rx_packets; + stats.bytes = total_rx_bytes; + sxe_rx_pkt_stats_update(rx_ring, &irq_data->rx.irq_rate, &stats); + + return total_rx_packets; +} + +void sxe_hw_rx_disable(struct sxe_adapter *adapter) +{ + u8 reg_idx; + u32 i, wait_loop, rxdctl; + struct sxe_ring *ring; + unsigned long wait_delay, delay_interval; + struct sxe_hw *hw = &adapter->hw; + + hw->dbu.ops->rx_cap_switch_off(hw); + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ring = adapter->rx_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + hw->dma.ops->rx_ring_switch_not_polling(hw, reg_idx, false); + } + + delay_interval = sxe_pcie_timeout_poll(adapter->pdev, hw); + wait_delay = delay_interval; + + wait_loop = SXE_MAX_TXRX_DESC_POLL; + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + rxdctl = 0; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ring = adapter->rx_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + rxdctl |= hw->dma.ops->rx_desc_ctrl_get(hw, reg_idx); + } + + if (!(rxdctl & SXE_RXDCTL_ENABLE)) { + goto l_end; + } + } + + LOG_MSG_ERR(drv, "RXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); + +l_end: + return; + +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.h new file mode 100644 index 000000000000..091b09b823db --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_rx_proc.h @@ -0,0 +1,240 @@ + +#ifndef __SXE_RX_H__ +#define __SXE_RX_H__ + +#include "sxe.h" + +#define ALIGN_4K (4096) +#define SXE_RX_BUFFER_WRITE (16) + +#define SXE_RXBUFFER_256 (256) +#define SXE_RXBUFFER_1536 (1536) +#define SXE_RXBUFFER_2K (2048) +#define SXE_RXBUFFER_3K (3072) +#define SXE_RXBUFFER_4K (4096) +#define SXE_MAX_RXBUFFER (16384) + +#define SXE_RX_HDR_SIZE SXE_RXBUFFER_256 +#define SXE_MIN_LRO_ITR (24) +#define SXE_RXDADV_RSSTYPE_MASK (0x0000000F) + +#define SXE_ETH_FRAMING (20) + +#define ETH_RSS_IPV4 (1ULL << 2) +#define ETH_RSS_NONFRAG_IPV4_TCP (1ULL << 4) +#define ETH_RSS_NONFRAG_IPV4_UDP (1ULL << 5) +#define ETH_RSS_IPV6 (1ULL << 8) +#define ETH_RSS_NONFRAG_IPV6_TCP (1ULL << 10) +#define ETH_RSS_NONFRAG_IPV6_UDP (1ULL << 11) + + +#define SXE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +#define SXE_B2BT(BT) (BT * 8) + +#define SXE_PFC_D 672 + +#define SXE_CABLE_DC 5556 +#define SXE_CABLE_DO 5000 + +#define SXE_PHY_D 12800 +#define SXE_MAC_D 4096 +#define SXE_XAUI_D (2 * 1024) + +#define SXE_ID (SXE_MAC_D + SXE_XAUI_D + SXE_PHY_D) + +#define SXE_HD 6144 + +#define SXE_PCI_DELAY 10000 + +#define SXE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (SXE_B2BT(_max_frame_link) + \ + SXE_PFC_D + \ + (2 * SXE_CABLE_DC) + \ + (2 * SXE_ID) + \ + SXE_HD) / 25 + 1) + \ + 2 * SXE_B2BT(_max_frame_tc)) + +#define SXE_LOW_DV(_max_frame_tc) \ + (2 * (2 * SXE_B2BT(_max_frame_tc) + \ + (36 * SXE_PCI_DELAY / 25) + 1)) + +struct sxe_skb_ctrl_buffer { + union { + struct sk_buff *head; + struct sk_buff *tail; + }; + + dma_addr_t dma; + u16 lro_cnt; + bool page_released; +}; + +struct sxe_rss_hash_config { + u8 *rss_key; + u8 rss_key_len; + u64 rss_hf; +}; + +#define SXE_CTRL_BUFFER(skb) ((struct sxe_skb_ctrl_buffer *)(skb)->cb) + +#ifdef HAVE_DMA_ATTRS_STRUCT +#define SXE_RX_DMA_ATTR NULL +#else +#define SXE_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +#define ring_uses_build_skb(ring) \ + test_bit(SXE_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#if (PAGE_SIZE < 8192) +#define SXE_MAX_2K_FRAME_BUILD_SKB (SXE_RXBUFFER_1536 - NET_IP_ALIGN) +#define SXE_2K_TOO_SMALL_WITH_PADDING \ + ((NET_SKB_PAD + SXE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(SXE_RXBUFFER_2K)) + +static inline u32 sxe_compute_pad(u32 rx_buf_len) +{ + u32 page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline u32 sxe_skb_pad(void) +{ + u32 rx_buf_len; + + if (SXE_2K_TOO_SMALL_WITH_PADDING) { + rx_buf_len = SXE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + } else { + rx_buf_len = SXE_RXBUFFER_1536; + } + + rx_buf_len -= NET_IP_ALIGN; + + return sxe_compute_pad(rx_buf_len); +} + +#define SXE_SKB_PAD sxe_skb_pad() +#else +#define SXE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +static inline u16 sxe_rx_pg_order(struct sxe_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(SXE_RX_3K_BUFFER, &ring->state)) { + return 1; + } + +#endif + return 0; +} + +#define sxe_rx_pg_size(_ring) (PAGE_SIZE << sxe_rx_pg_order(_ring)) + +s32 sxe_rss_hash_conf_get(struct sxe_adapter *adapter, + struct sxe_rss_hash_config *rss_conf); + +s32 sxe_rx_configure(struct sxe_adapter *adapter); + +void sxe_rx_ring_free(struct sxe_ring *ring); + +void sxe_rx_resources_free(struct sxe_adapter *adapter); + +s32 sxe_rx_ring_depth_reset(struct sxe_adapter *adapter, u32 rx_cnt); + +void sxe_rx_ring_buffer_clean(struct sxe_ring *ring); + +u32 sxe_rx_ring_irq_clean(struct sxe_irq_data *q_vector, + struct sxe_ring *rx_ring, + const u32 budget); + +void sxe_hw_rx_disable(struct sxe_adapter *adapter); + +void sxe_hw_rx_configure(struct sxe_adapter *adapter); + +void sxe_skb_fields_process(struct sxe_ring *rx_ring, + union sxe_rx_data_desc *rx_desc, + struct sk_buff *skb); + +void sxe_rx_skb_deliver(struct sxe_irq_data *irq_data, + struct sk_buff *skb); + +void sxe_rx_ring_attr_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring); + +static inline void sxe_rx_ring_buffer_init(struct sxe_ring *rx_ring) +{ + memset(rx_ring->rx_buffer_info, 0, + sizeof(struct sxe_rx_buffer) * rx_ring->depth); + return; +} + +static inline void sxe_rx_pkt_stats_update(struct sxe_ring *rx_ring, + struct sxe_irq_rate *irq_rate, + struct sxe_ring_stats *stats) +{ + LOG_DEBUG("in the irq, process total packets[%llu], bytes[%llu]\n", + stats->packets, stats->bytes); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += stats->packets; + rx_ring->stats.bytes += stats->bytes; + u64_stats_update_end(&rx_ring->syncp); + + irq_rate->total_packets += stats->packets; + irq_rate->total_bytes += stats->bytes; + + return; +} + +void sxe_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 cleaned_count); + +s32 sxe_test_rx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring); + +static inline u16 sxe_desc_unused(struct sxe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->depth) + ntc - ntu - 1; +} + +static inline u32 sxe_rx_bufsz(struct sxe_ring *ring) +{ + u32 bufsz; + + if (test_bit(SXE_RX_3K_BUFFER, &ring->state)) { + bufsz = SXE_RXBUFFER_3K; + goto l_ret; + } + +#if (PAGE_SIZE < 8192) + if (ring_uses_build_skb(ring)) { + bufsz = SXE_MAX_2K_FRAME_BUILD_SKB; + goto l_ret; + } + +#endif + bufsz = SXE_RXBUFFER_2K; + +l_ret: + return bufsz; +} + +static inline void sxe_rx_release(struct sxe_adapter *adapter) +{ + sxe_rx_resources_free(adapter); + + return; +} + +static inline u32 sxe_rss_redir_tbl_size_get(void) +{ + return SXE_MAX_RETA_ENTRIES; +} +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.c new file mode 100644 index 000000000000..068d7bb8c599 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.c @@ -0,0 +1,2283 @@ + +#include +#include +#include + +#include "sxe.h" +#include "sxe_hw.h" +#include "sxe_sriov.h" +#include "sxe_filter.h" +#include "sxe_netdev.h" +#include "sxe_rx_proc.h" +#include "sxe_ipsec.h" +#include "sxe_dcb.h" +#include "sxe_pci.h" +#include "sxe_ipsec.h" +#include "sxe_ring.h" + +#ifdef CONFIG_PCI_IOV +static unsigned int max_vfs; +#ifndef SXE_TEST +module_param(max_vfs, uint, S_IRUGO | S_IWUSR); + +MODULE_PARM_DESC(max_vfs, + "Max number of vf per physical function - default is zero and maximum value is 63. (Deprecated)"); +#endif +#endif + +static s32 sxe_vf_uc_addr_sync(struct sxe_adapter *adapter, u8 vf_idx, + u16 index, u8 *mac_addr); + +STATIC s32 sxe_vlvf_entry_find(struct sxe_hw *hw, u32 vlan) +{ + u32 vlvf; + s32 idx = 0; + struct sxe_adapter *adapter = hw->adapter; + + if (vlan == 0) { + LOG_DEBUG_BDF("vlan:0 use default idx:0\n"); + goto l_out; + } + + for (idx = SXE_VLVF_ENTRIES; --idx;) { + vlvf = hw->filter.vlan.ops->pool_filter_read(hw, idx); + if ((vlvf & VLAN_VID_MASK) == vlan) { + break; + } + } + + LOG_DEBUG_BDF("found vlan[%u] in idx[%u]\n", vlan, idx); + +l_out: + return idx; +} + +static void sxe_pf_promisc_vlvf_update(struct sxe_adapter *adapter, u32 vid) +{ + struct sxe_hw *hw = &adapter->hw; + + u32 bits, word; + int idx; + + idx = sxe_vlvf_entry_find(hw, vid); + if (!idx) { + goto l_end; + } + + word = idx * 2 + (PF_POOL_INDEX(0) / VF_BLOCK_BITS); + bits = ~BIT(PF_POOL_INDEX(0) % VF_BLOCK_BITS); + bits &= hw->filter.vlan.ops->pool_filter_bitmap_read(hw, word); + + LOG_DEBUG_BDF("in vlan idx[%u] vlvfb[%u] bits[%u]\n", idx, word, bits); + if (!bits && !hw->filter.vlan.ops->pool_filter_bitmap_read(hw, word ^ 1)) { + if (!(adapter->cap & SXE_VLAN_PROMISC)) { + hw->filter.vlan.ops->pool_filter_bitmap_write(hw, word, 0); + } + + hw->filter.vlan.ops->pool_filter_write(hw, idx, 0); + } + +l_end: + return; +} + +static s32 sxe_vf_vlan_configure(struct sxe_adapter *adapter, bool add, int vid, + u32 vf) +{ + struct sxe_hw *hw = &adapter->hw; + s32 err; + + LOG_DEBUG_BDF("vf[%u] vid[%u] add = %s\n", vf, vid, add ? "true" : "false"); + if (add && test_bit(vid, adapter->vlan_ctxt.active_vlans)) { + err = hw->filter.vlan.ops->filter_configure(hw, vid, + PF_POOL_INDEX(0), true, false); + if (err) { + LOG_ERROR_BDF("vid[%u] has pf monitoring and" + "alloc vlvf failed\n", vid); + goto l_ret; + } + } + + err = hw->filter.vlan.ops->filter_configure(hw, vid, vf, add, false); + + if (add && !err) { + LOG_DEBUG_BDF("vf[%u] config vid[%u] success\n", vf, vid); + goto l_ret; + } + + if (test_bit(vid, adapter->vlan_ctxt.active_vlans) || + (adapter->cap & SXE_VLAN_PROMISC)) { + sxe_pf_promisc_vlvf_update(adapter, vid); + } + +l_ret: + return err; +} + +static s32 sxe_port_vlan_disable(struct sxe_adapter *adapter, int vf) +{ + struct sxe_hw *hw = &adapter->hw; + s32 ret; + + ret = sxe_vf_vlan_configure(adapter, false, + adapter->vt_ctxt.vf_info[vf].pf_vlan, vf); + + sxe_vf_vlan_configure(adapter, true, 0, vf); + hw->dma.ops->tx_vlan_tag_clear(hw, vf); + hw->filter.vlan.ops->untagged_pkts_rcv_switch(hw, vf, true); + + adapter->vt_ctxt.vf_info[vf].pf_vlan = 0; + adapter->vt_ctxt.vf_info[vf].pf_qos = 0; + + return ret; +} + +static s32 sxe_port_vlan_enable(struct sxe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct sxe_hw *hw = &adapter->hw; + s32 ret; + + ret = sxe_vf_vlan_configure(adapter, true, vlan, vf); + if (ret) { + goto out; + } + + sxe_vf_vlan_configure(adapter, false, 0, vf); + + hw->dma.ops->tx_vlan_tag_set(hw, vlan, qos, vf); + hw->filter.vlan.ops->untagged_pkts_rcv_switch(hw, vf, false); + + adapter->vt_ctxt.vf_info[vf].pf_vlan = vlan; + adapter->vt_ctxt.vf_info[vf].pf_qos = qos; + LOG_DEV_INFO("setting vlan %d, qos 0x%x on vf %d\n", vlan, qos, vf); + + if (test_bit(SXE_DOWN, &adapter->state)) { + LOG_DEV_WARN("the vf vlan has been set, but the pf device is not up.\n"); + LOG_DEV_WARN("bring the pf device up before attempting to use the vf device.\n"); + } + +out: + return ret; +} + +static void sxe_vf_rate_factor_set(struct sxe_adapter *adapter, u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + u16 tx_rate = adapter->vt_ctxt.vf_info[vf_idx].tx_rate; + u8 pool_mask = sxe_pool_mask_get(adapter); + u8 ring_per_pool = __ALIGN_MASK(1, ~pool_mask); + u32 value = 0; + u8 idx; + + if (tx_rate) { + value = sxe_mbps_link_speed_get(adapter->link.speed); + + value <<= SXE_RTTBCNRC_RF_INT_SHIFT; + value /= tx_rate; + + value &= SXE_RTTBCNRC_RF_INT_MASK | + SXE_RTTBCNRC_RF_DEC_MASK; + + value |= SXE_RTTBCNRC_RS_ENA; + } + + hw->dma.ops->max_dcb_memory_window_set(hw, SXE_DCB_MMW_SIZE_DEFAULT); + + for (idx = 0; idx < ring_per_pool; idx++) { + u32 reg_idx = (vf_idx * ring_per_pool) + idx; + + hw->dma.ops->dcb_tx_ring_rate_factor_set(hw, reg_idx, value); + } + + return; +} + +void sxe_vf_rate_update(struct sxe_adapter *adapter) +{ + u8 i; + unsigned long flags; + + if (!adapter->vt_ctxt.is_rate_set) { + goto l_out; + } + + if (sxe_mbps_link_speed_get(adapter->link.speed) != SXE_LINK_MBPS_SPEED_DEFAULT) { + adapter->vt_ctxt.is_rate_set = false; + LOG_DEV_INFO("link speed has been changed. disable vf tx rate.\n"); + } + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (i = 0; i < adapter->vt_ctxt.num_vfs; i++) { + if (!adapter->vt_ctxt.is_rate_set) { + adapter->vt_ctxt.vf_info[i].tx_rate = 0; + } + + sxe_vf_rate_factor_set(adapter, i); + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + +l_out: + return; +} + +s32 sxe_set_vf_rate(struct net_device *netdev, s32 vf_idx, + s32 min_rate, s32 max_rate) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt; + u32 mbps_speed = sxe_mbps_link_speed_get(adapter->link.speed); + s32 ret = -EINVAL; + + if (vf_idx >= vt_ctxt->num_vfs) { + LOG_ERROR_BDF("invalid vf_idx:%u exceed vf num:%u, " + "min_rate:%d max_rate:%d.(err:%d)\n", + vf_idx, vt_ctxt->num_vfs, + min_rate, max_rate, ret); + goto l_out; + } + + if (!adapter->link.is_up) { + LOG_ERROR_BDF("dev not link up, can't set vf:%u link " + "rate min:%d max:%d.(err:%d)\n", + vf_idx, min_rate, max_rate, ret); + goto l_out; + } + + if (mbps_speed != SXE_LINK_MBPS_SPEED_DEFAULT) { + LOG_ERROR_BDF("link speed:0x%x invalid," + "vf_idx:%u min_rate:%u max_rate:%u.(err:%d)\n", + mbps_speed, vf_idx, min_rate, max_rate, ret); + goto l_out; + } + + if (min_rate) { + LOG_ERROR_BDF("invalid min_rate:%u.(err:%d)\n", min_rate, ret); + goto l_out; + } + + if (max_rate && ((max_rate <= SXE_LINK_MBPS_SPEED_MIN) + || (max_rate > mbps_speed))) { + LOG_ERROR_BDF("invalid max_rate:%u.(err:%d)\n", max_rate, ret); + goto l_out; + } + + adapter->vt_ctxt.is_rate_set = true; + adapter->vt_ctxt.vf_info[vf_idx].tx_rate = max_rate; + + sxe_vf_rate_factor_set(adapter, vf_idx); + ret = 0; + + LOG_INFO_BDF("vf:%u tx min_rate:%u max_rate:%u link_speed:%u.\n", + vf_idx, min_rate, max_rate, mbps_speed); + +l_out: + return ret; +} + +s32 sxe_set_vf_vlan(struct net_device *netdev, s32 vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + s32 ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + + LOG_INFO_BDF("netdev[%p] pf set vf_idx[%u], max_vf[%u], vlan[%u]," + "qos[%u], vlan_proto[%u]\n", + netdev, vf, adapter->vt_ctxt.num_vfs, vlan, qos, vlan_proto); + if ((vf >= adapter->vt_ctxt.num_vfs) || (vlan > SXE_MAX_VLAN_IDX) || (qos > SXE_MAX_QOS_IDX)) { + ret = -EINVAL; + goto l_out; + } + + if (vlan_proto != htons(ETH_P_8021Q)) { + ret = -EPROTONOSUPPORT; + goto l_out; + } + + if (vlan || qos) { + if (adapter->vt_ctxt.vf_info[vf].pf_vlan) { + LOG_INFO_BDF("vf[%u] pf_vlan[%u] exist, disable first\n", + vf, adapter->vt_ctxt.vf_info[vf].pf_vlan); + ret = sxe_port_vlan_disable(adapter, vf); + if (ret) { + goto l_out; + } + } + + ret = sxe_port_vlan_enable(adapter, vf, vlan, qos); + LOG_INFO_BDF("pf enable vf[%u], vlan[%u], qos[%u] ret = %d\n", + vf, vlan, qos, ret); + } else { + ret = sxe_port_vlan_disable(adapter, vf); + } + +l_out: + return ret; +} + +static void sxe_clear_vf_vlan(struct sxe_adapter *adapter, u32 vf) +{ + struct sxe_hw *hw = &adapter->hw; + u32 vlvfb_mask, pool_mask, i; + u32 bits[2], vlvfb, vid, vfta, vlvf, word, mask; + + LOG_DEBUG_BDF("clear vf[%u] vlan\n",vf); + + pool_mask = ~BIT(PF_POOL_INDEX(0) % VF_BLOCK_BITS); + vlvfb_mask = BIT(vf % VF_BLOCK_BITS); + + for (i = SXE_VLVF_ENTRIES; i--;) { + word = i * 2 + vf / VF_BLOCK_BITS; + vlvfb = hw->filter.vlan.ops->pool_filter_bitmap_read(hw, word); + + if (!(vlvfb & vlvfb_mask)) { + continue; + } + + vlvfb ^= vlvfb_mask; + + bits[word % 2] = vlvfb; + bits[~word % 2] = + hw->filter.vlan.ops->pool_filter_bitmap_read(hw, (word ^ 1)); + + if (bits[(PF_POOL_INDEX(0) / VF_BLOCK_BITS) ^ 1] || + (bits[PF_POOL_INDEX(0) / VF_BLOCK_BITS] & pool_mask)) { + goto update_vlvfb; + } + + if (bits[0] || bits[1]) { + goto update_vlvf; + } + + vlvf = hw->filter.vlan.ops->pool_filter_read(hw, i); + if (!vlvf) { + goto update_vlvfb; + } + + vid = vlvf & VLAN_VID_MASK; + mask = BIT(vid % 32); + + vfta = hw->filter.vlan.ops->filter_array_read(hw, vid / 32); + if (vfta & mask) { + hw->filter.vlan.ops-> + filter_array_write(hw, vid / 32, vfta ^ mask); + } + +update_vlvf: + hw->filter.vlan.ops->pool_filter_write(hw, i, 0); + + if (!(adapter->cap & SXE_VLAN_PROMISC)) { + vlvfb = 0; + } + +update_vlvfb: + hw->filter.vlan.ops->pool_filter_bitmap_write(hw, word, vlvfb); + } + + return; +} + +static s32 sxe_vf_mac_addr_set(struct sxe_adapter *adapter, + u8 vf_idx, u8 *mac_addr) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + u8 *vf_addr = adapter->vt_ctxt.vf_info[vf_idx].mac_addr; + + ret = sxe_uc_addr_del(hw, adapter->mac_filter_ctxt.uc_addr_table, + vf_addr, vf_idx); + if (ret) { + LOG_WARN_BDF("vf_idx:%d mac addr:%pM not in uc mac filter " + "or zero addr.\n", vf_idx, vf_addr); + } + ret = sxe_uc_addr_add(hw, adapter->mac_filter_ctxt.uc_addr_table, + mac_addr, vf_idx); + if (ret < 0) { + eth_zero_addr(vf_addr); + } else { + memcpy(vf_addr, mac_addr, ETH_ALEN); + } + + LOG_INFO_BDF("add vf_idx:%d mac addr:%pM to uc filter ret:%d.\n", + vf_idx, vf_addr, ret); + + return ret; +} + +s32 sxe_set_vf_mac(struct net_device *dev, s32 vf_idx, u8 *mac_addr) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_vf_info *vf_info; + s32 ret = 0; + + if (vf_idx >= adapter->vt_ctxt.num_vfs) { + ret = -EINVAL; + LOG_ERROR_BDF("set vf mac addr:%pM fail due to " + "vf_idx:%d exceed num_vfs:%d.(err:%d).\n", + mac_addr, vf_idx, adapter->vt_ctxt.num_vfs, ret); + goto l_out; + } + + vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + + if (is_valid_ether_addr(mac_addr)) { + LOG_DEV_INFO("setting mac address:%pM on vf:%u.\n", mac_addr, vf_idx); + LOG_DEV_INFO("reload vf driver to make mac addr effective.\n"); + + ret = sxe_vf_mac_addr_set(adapter, vf_idx, mac_addr); + if (ret < 0) { + LOG_DEV_WARN("vf:%d set mac addr:%pM fail " + "due to no space.(err:%d)\n", + vf_idx, mac_addr, ret); + } else { + vf_info->mac_from_pf = true; + if (test_bit(SXE_DOWN, &adapter->state)) { + LOG_DEV_WARN("vf:%u mac address has been set by pf, " + "but pf device is not up.\n", + vf_idx); + LOG_DEV_WARN("bring pf device up before attempting " + "to use the vf device.\n"); + } + } + } else if (is_zero_ether_addr(mac_addr)) { + if (is_zero_ether_addr(vf_info->mac_addr)) { + LOG_INFO_BDF("vf:%u mac addr is zero, skip dup set to zero.\n", + vf_idx); + goto l_out; + } + + LOG_DEV_INFO("delete vf:%u mac addr\n", vf_idx); + + ret = sxe_uc_addr_del(&adapter->hw, + adapter->mac_filter_ctxt.uc_addr_table, + vf_info->mac_addr, vf_idx); + if (ret < 0) { + LOG_DEV_WARN("vf:%u mac addr:%pM delete fail.\n", + vf_idx, vf_info->mac_addr); + } else { + vf_info->mac_from_pf = false; + memset(&vf_info->mac_addr, 0, ETH_ALEN); + } + } else { + ret = -EINVAL; + LOG_ERROR_BDF("mac addr:%pM set on vf:%u invalid.(err:%d)\n", + mac_addr, vf_idx, ret); + } + +l_out: + return ret; +} + +static s32 sxe_mbx_msg_send(struct sxe_adapter *adapter, u16 index, u32 *msg, u16 len) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + + ret = hw->mbx.ops->msg_send(hw, msg, len, index); + if (ret) { + LOG_ERROR_BDF("vf:%u send msg:0x%x len:%u fail.(err:%d)\n", + index, *msg, len, ret); + } + + return ret; +} + +void sxe_vf_trust_update_notify(struct sxe_adapter *adapter, u16 index) +{ + u32 msg = SXE_CTRL_MSG_REINIT; + unsigned long flags; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + + sxe_mbx_msg_send(adapter, index, &msg, SXE_MSG_NUM(sizeof(msg))); + + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + LOG_WARN_BDF("pf send trust status change ctrl msg:0x%x to vf:%u\n", + msg, index); + + return; +} + +void sxe_netdev_down_notify_vf_all(struct sxe_adapter *adapter) +{ + struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt; + u8 i; + u32 msg; + unsigned long flags; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (i = 0; i < vt_ctxt->num_vfs; i++) { + msg = SXE_CTRL_MSG_NETDEV_DOWN; + sxe_mbx_msg_send(adapter, i, &msg, SXE_MSG_NUM(sizeof(msg))); + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + LOG_WARN_BDF("pf send netdev down ctrl msg:0x%x to all vf, num_vfs:%u\n", + msg, vt_ctxt->num_vfs); + + return; +} + +void sxe_link_update_notify_vf_all(struct sxe_adapter *adapter) +{ + struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt; + u8 i; + u32 msg; + unsigned long flags; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (i = 0; i < vt_ctxt->num_vfs; i++) { + if (vt_ctxt->vf_info[i].is_ready) { + msg = SXE_CTRL_MSG_LINK_UPDATE; + } else { + msg = SXE_CTRL_MSG_REINIT; + } + sxe_mbx_msg_send(adapter, i, &msg, SXE_MSG_NUM(sizeof(msg))); + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + LOG_WARN_BDF("pf send link update ctrl msg to all vf " + "num_vfs:%u.\n", + vt_ctxt->num_vfs); + + return; +} + +static void sxe_vf_vlan_rst(struct sxe_adapter *adapter, u8 vf_idx) +{ + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + struct sxe_hw *hw = &adapter->hw; + u8 tc_num = sxe_dcb_tc_get(adapter); + + sxe_clear_vf_vlan(adapter, vf_idx); + + sxe_vf_vlan_configure(adapter, true, vf_info->pf_vlan, vf_idx); + + hw->filter.vlan.ops->untagged_pkts_rcv_switch(hw, vf_idx, !vf_info->pf_vlan); + + if (!vf_info->pf_vlan && !vf_info->pf_qos && !tc_num) { + hw->dma.ops->tx_vlan_tag_clear(hw, vf_idx); + } else { + if (vf_info->pf_qos || !tc_num) { + hw->dma.ops->tx_vlan_tag_set(hw, vf_info->pf_vlan, + vf_info->pf_qos, vf_idx); + } else { + hw->dma.ops->tx_vlan_tag_set(hw, vf_info->pf_vlan, + adapter->dcb_ctxt.default_up, vf_idx); + } + + if (vf_info->spoof_chk_enabled) { + hw->dma.ops->pool_mac_anti_spoof_set(hw, vf_idx, true); + hw->dma.ops->pool_vlan_anti_spoof_set(hw, vf_idx, true); + } + } + + return; +} + +void sxe_vf_hw_rst(struct sxe_adapter *adapter, u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + u16 pool_mask = sxe_pool_mask_get(adapter); + u8 ring_cnt = __ALIGN_MASK(1, ~pool_mask); + + sxe_vf_vlan_rst(adapter, vf_idx); + vf_info->mc_hash_used = 0; + +#ifdef SXE_IPSEC_CONFIGURE + sxe_vf_ipsec_entry_clear(adapter, vf_idx); +#endif + + __sxe_set_rx_mode(adapter->netdev, false); + + sxe_uc_addr_del(hw, adapter->mac_filter_ctxt.uc_addr_table, + vf_info->mac_addr, vf_idx); + + sxe_vf_uc_addr_sync(adapter, vf_idx, 0, NULL); + + + hw->dma.ops->vf_tx_ring_disable(hw, ring_cnt, vf_idx); + + LOG_INFO_BDF("vf_idx:%u vf flr done.\n", vf_idx); + + return; +} + +static void sxe_vf_rxtx_set(struct sxe_adapter *adapter, u8 vf_idx) +{ + u32 enable_pool; + u8 reg_idx = vf_idx / VF_BLOCK_BITS; + u8 bit_idx = vf_idx % VF_BLOCK_BITS; + struct sxe_hw *hw = &adapter->hw; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + + enable_pool = hw->dma.ops->tx_pool_get(hw, reg_idx); + if(vf_info->link_enable){ + enable_pool |= BIT(bit_idx); + } else { + enable_pool &= ~BIT(bit_idx); + } + hw->dma.ops->tx_pool_set(hw, reg_idx, enable_pool); + + enable_pool = hw->dma.ops->rx_pool_get(hw, reg_idx); + if ((adapter->netdev->mtu + ETH_HLEN > ETH_FRAME_LEN) || + !vf_info->link_enable) { + enable_pool &= ~BIT(bit_idx); + } else { + enable_pool |= BIT(bit_idx); + } + hw->dma.ops->rx_pool_set(hw, reg_idx, enable_pool); +} + +static void sxe_vf_rxtx_rst(struct sxe_adapter *adapter, u8 vf_idx) +{ + u16 mask = sxe_pool_mask_get(adapter); + u8 ring_per_pool = __ALIGN_MASK(1, ~mask); + u8 reg_idx = vf_idx / VF_BLOCK_BITS; + u8 bit_idx = vf_idx % VF_BLOCK_BITS; + struct sxe_hw *hw = &adapter->hw; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + + hw->dma.ops->pool_rx_ring_drop_enable(hw, vf_idx, + vf_info->pf_vlan, ring_per_pool); + + sxe_vf_rxtx_set(adapter, vf_idx); + + vf_info->is_ready = true; + + hw->dma.ops->spoof_count_enable(hw, reg_idx, bit_idx); + + hw->dma.ops->vf_tx_desc_addr_clear(hw, vf_idx, ring_per_pool); + + return; +} + +static void sxe_vf_rst_msg_reply(struct sxe_adapter *adapter, u8 vf_idx) +{ + struct sxe_rst_reply reply = {}; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + u8 *mac_addr = vf_info->mac_addr; + + reply.msg_type = SXE_VFREQ_RESET; + if (!is_zero_ether_addr(mac_addr) && + vf_info->mac_from_pf) { + reply.msg_type |= SXE_MSGTYPE_ACK; + memcpy((u8 *)reply.mac_addr, mac_addr, ETH_ALEN); + } else { + reply.msg_type |= SXE_MSGTYPE_NACK; + } + + reply.sw_mtu = sxe_sw_mtu_get(adapter); + + LOG_INFO_BDF("vf_idx:%d reset msg:0x%x handle done.mac addr:%pM mc type:%d " + "mac_from_pf:%d sw_mtu:%u\n", + vf_idx, reply.msg_type, + mac_addr, SXE_MC_FILTER_TYPE0, + vf_info->mac_from_pf, + reply.sw_mtu); + + reply.mc_filter_type = SXE_MC_FILTER_TYPE0; + + adapter->hw.mbx.ops->msg_send(&adapter->hw, (u32 *)&reply, + SXE_MSG_NUM(sizeof(reply)), vf_idx); + + return; +} + +static void sxe_vf_rst_msg_handle(struct sxe_adapter *adapter, u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + u8 *mac_addr = adapter->vt_ctxt.vf_info[vf_idx].mac_addr; + + LOG_MSG_INFO(probe, "receive vf_idx:%d reset msg.\n", vf_idx); + + sxe_vf_hw_rst(adapter, vf_idx); + + hw->mbx.ops->mbx_mem_clear(hw, vf_idx); + + if (!is_zero_ether_addr(mac_addr)) { + sxe_vf_mac_addr_set(adapter, vf_idx, mac_addr); + } + + sxe_vf_rxtx_rst(adapter, vf_idx); + + sxe_vf_rst_msg_reply(adapter, vf_idx); + + return; +} + +static s32 sxe_vf_mac_addr_set_handler(struct sxe_adapter *adapter, + u32 *msg, u8 vf_idx) +{ + struct sxe_uc_addr_msg mac_msg = *(struct sxe_uc_addr_msg *)msg; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + s32 ret; + + if (!is_valid_ether_addr(mac_msg.uc_addr)) { + ret = -SXE_ERR_PARAM; + LOG_MSG_WARN(drv, "vf_idx:j%u invalid mac addr:%pM.(err:%d)\n", + vf_idx, mac_msg.uc_addr, ret); + goto l_out; + } + + if (vf_info->mac_from_pf && !vf_info->trusted && + !ether_addr_equal(vf_info->mac_addr, mac_msg.uc_addr)) { + ret = -SXE_ERR_PARAM; + LOG_MSG_WARN(drv, "vf_idx:%d mac addr:%pM attempt to " + "override admin mac addr:%pM.\n", + vf_idx, mac_msg.uc_addr, vf_info->mac_addr); + LOG_MSG_WARN(drv, "reload the VF driver to resume operations.\n"); + goto l_out; + } + + ret = sxe_vf_mac_addr_set(adapter, vf_idx, mac_msg.uc_addr); + if (ret < 0) { + LOG_INFO_BDF("vf_idx:%d set mac addr:%pM fail.(err:%d)\n", + vf_idx, mac_msg.uc_addr, ret); + goto l_out; + } + + LOG_INFO_BDF("vf:%d set mac addr:%pM to filter entry:%d done." + "mac_from_pf:%d trusted:%d vf[%d]->mac_addr:%pM.\n", + vf_idx, mac_msg.uc_addr, ret, + vf_info->mac_from_pf, vf_info->trusted, + vf_idx, vf_info->mac_addr); + + ret = 0; + +l_out: + return ret; + +} + +static s32 sxe_vf_mc_addr_sync(struct sxe_adapter *adapter, + u32 *msg, u8 vf_idx) +{ + struct sxe_mc_sync_msg *mc_msg = (struct sxe_mc_sync_msg *)msg; + u8 mc_cnt = min_t(u16, mc_msg->mc_cnt, SXE_VF_MC_ENTRY_NUM_MAX); + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + struct sxe_mac_filter_context *mac_filter = &adapter->mac_filter_ctxt; + struct sxe_hw *hw = &adapter->hw; + u8 i; + + for (i = 0; i < SXE_MTA_ENTRY_NUM_MAX; i++) { + hw->filter.mac.ops->mta_hash_table_set(hw, i, + mac_filter->mc_hash_table[i]); + } + + vf_info->mc_hash_used = mc_cnt; + memset(vf_info->mc_hash, 0, sizeof(vf_info->mc_hash)); + + for (i = 0; i < mc_cnt; i++) { + vf_info->mc_hash[i] = mc_msg->mc_addr_extract[i]; + LOG_INFO_BDF("vf_idx:%u mc_cnt:%u mc_hash[%d]:0x%x\n", + vf_idx, mc_cnt, i, vf_info->mc_hash[i]); + } + + sxe_vf_mc_addr_restore(adapter); + + return 0; +} + +static void sxe_vf_uc_addr_del(struct sxe_adapter *adapter, + u8 vf_idx) +{ + struct sxe_vf_uc_addr_list *entry; + struct sxe_virtual_context *vf = &adapter->vt_ctxt; + s32 ret; + + list_for_each_entry(entry, &vf->head.list, list) { + if (entry->vf_idx == vf_idx) { + entry->vf_idx = -1; + entry->free = true; + entry->is_macvlan = false; + ret = sxe_uc_addr_del(&adapter->hw, + adapter->mac_filter_ctxt.uc_addr_table, + entry->uc_addr, vf_idx); + LOG_INFO_BDF("del vf:%d mac addr:%pM in mac list done(ret:%d).\n", + vf_idx, entry->uc_addr, ret); + } + } + + return; +} + +static s32 sxe_vf_uc_addr_sync(struct sxe_adapter *adapter, u8 vf_idx, + u16 index, u8 *mac_addr) +{ + struct sxe_vf_uc_addr_list *entry = NULL; + struct sxe_virtual_context *vf = &adapter->vt_ctxt; + s32 ret = 0; + + if (index <= 1) { + sxe_vf_uc_addr_del(adapter, vf_idx); + } + + if (index == 0) { + LOG_INFO_BDF("del vf_idx:%d all mac addr done.\n", vf_idx); + goto l_out; + } + + list_for_each_entry(entry, &vf->head.list, list) { + if (entry->free) { + break; + } + } + + if (!entry || !entry->free) { + ret = -SXE_ERR_NO_SPACE; + LOG_ERROR_BDF("vf_idx:%d has no space to sync %pM.(err:%d)\n", + vf_idx, mac_addr, ret); + goto l_out; + } + + ret = sxe_uc_addr_add(&adapter->hw, + adapter->mac_filter_ctxt.uc_addr_table, + mac_addr, vf_idx); + if (ret < 0) { + LOG_ERROR_BDF("vf_idx:%d list member index:%d add %p fail.\n", + vf_idx, index, mac_addr); + goto l_out; + } + + entry->free = false; + entry->is_macvlan = true; + entry->vf_idx = vf_idx; + ether_addr_copy(entry->uc_addr, mac_addr); + +l_out: + return ret; +} + +STATIC s32 sxe_vf_uc_addr_sync_handler(struct sxe_adapter *adapter, + u32 *msg, u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + struct sxe_uc_sync_msg *uc_msg = (struct sxe_uc_sync_msg *)msg; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + u16 index = uc_msg->index; + u8 *mac_addr = (u8 *)uc_msg->addr; + s32 ret; + + if (vf_info->mac_from_pf && !vf_info->trusted && index) { + ret = -SXE_ERR_OPRATION_NOT_PERM; + LOG_MSG_ERR(drv, "vf:%d has set mac addr by pf and vf not trusted, " + "deny add uc mac addr.(err:%d)\n", + vf_idx, ret); + goto l_out; + } + + if (index) { + if (!is_valid_ether_addr(mac_addr)) { + ret = -SXE_ERR_PARAM; + LOG_ERROR_BDF("index:%u vf:%d set invalid addr:%pM.(err:%d)\n", + index, vf_idx, mac_addr, ret); + goto l_out; + } + + if (adapter->vt_ctxt.vf_info[vf_idx].spoof_chk_enabled) { + hw->dma.ops->pool_mac_anti_spoof_set(hw, vf_idx, false); + hw->dma.ops->pool_vlan_anti_spoof_set(hw, vf_idx, false); + } + } + + ret = sxe_vf_uc_addr_sync(adapter, vf_idx, index, mac_addr); + if (ret < 0) { + LOG_MSG_ERR(drv, "msg_type:0x%x vf_idx:%d mac addr:%pM sync " + "msg handler err.(%d)\n", + uc_msg->msg_type, vf_idx, mac_addr, ret); + goto l_out; + } + + LOG_INFO_BDF("msg_type:0x%x vf_idx:%d index:%d mac addr:%pM ret:%d " + "uc mac msg handler done.\n", + uc_msg->msg_type, vf_idx, + index, mac_addr, ret); + +l_out: + return min_t(s32, ret, 0); +} + +STATIC s32 sxe_mbx_api_set_handler(struct sxe_adapter *adapter, + u32 *msg, u8 vf_idx) +{ + struct sxe_mbx_api_msg *api_msg = (struct sxe_mbx_api_msg *)msg; + s32 ret = -SXE_ERR_PARAM; + + switch (api_msg->api_version) { + case SXE_MBX_API_10: + case SXE_MBX_API_11: + case SXE_MBX_API_12: + case SXE_MBX_API_13: + case SXE_MBX_API_14: + adapter->vt_ctxt.vf_info[vf_idx].mbx_version = api_msg->api_version; + ret = 0; + LOG_INFO_BDF("mailbox api version:%u set success.\n", api_msg->api_version); + break; + default: + LOG_MSG_ERR(drv, "invalid mailbox api version:%u.\n", + api_msg->api_version); + break; + } + + return ret; +} + +static s32 sxe_pf_ring_info_get(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx) +{ + struct sxe_ring_info_msg *ring_msg = (struct sxe_ring_info_msg *)msg; + u16 mask = sxe_pool_mask_get(adapter); + u8 default_tc = 0; + u8 num_tc = sxe_dcb_tc_get(adapter); + + ring_msg->max_rx_num = __ALIGN_MASK(1, ~mask); + ring_msg->max_tx_num = __ALIGN_MASK(1, ~mask); + + if (num_tc > 1) { + default_tc = netdev_get_prio_tc_map(adapter->netdev, + adapter->dcb_ctxt.default_up); + ring_msg->tc_num = num_tc; + } else if (adapter->vt_ctxt.vf_info->pf_vlan || adapter->vt_ctxt.vf_info->pf_qos) { + ring_msg->tc_num = 1; + } else { + ring_msg->tc_num = 0; + }; + + ring_msg->default_tc = default_tc; + + LOG_INFO_BDF("vf:%d get ring info tc_num:%d default_tc:%d " + "max_tx_num:%d max_rx_num:%d.\n", + vf_idx, ring_msg->tc_num, ring_msg->default_tc, + ring_msg->max_tx_num, ring_msg->max_rx_num); + + return 0; +} + +static s32 sxe_vf_rx_max_frame_set(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + u32 max_frame = msgbuf[1] + ETH_HLEN + ETH_FCS_LEN; + + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift; + s32 ret = 0; + + if (pf_max_frame > ETH_FRAME_LEN) { + LOG_INFO_BDF("pf_max_frame=%u\n", pf_max_frame); + } else { + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + LOG_ERROR_BDF("frame oversize, pf_max_frame=%u, " + "vf:%u max_frame=%u\n", + pf_max_frame, vf_idx, max_frame); + ret = -EINVAL; + } + } + + vf_shift = vf_idx % VF_BLOCK_BITS; + reg_offset = vf_idx / VF_BLOCK_BITS; + + hw->dbu.ops->vf_rx_switch(hw, reg_offset, vf_shift, !!ret); + if (ret) { + LOG_MSG_ERR(drv, "vf:%u max_frame %d out of range\n", + vf_idx, max_frame); + goto l_end; + } + + if (max_frame > SXE_MAX_JUMBO_FRAME_SIZE) { + LOG_MSG_ERR(drv, "vf:%u max_frame %d out of range\n", vf_idx, max_frame); + ret = -EINVAL; + goto l_end; + } + + if ((pf_max_frame + ETH_FCS_LEN) < max_frame) { + ret = -EINVAL; + LOG_ERROR_BDF("vf:%u max_frame:%u exceed pf max_frame:%u not " + "permited.(err:%d)\n", + vf_idx, max_frame, + (pf_max_frame + ETH_FCS_LEN), ret); + goto l_end; + } + + LOG_MSG_INFO(hw, "vf:%u requests change max MTU to %d pf_mtu:%u.\n", + vf_idx, max_frame, + adapter->netdev->mtu); + +l_end: + return ret; +} + +static s32 sxe_vf_vlan_update_handler(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf) +{ + u32 add = (msgbuf[0] & SXE_VFREQ_MSGINFO_MASK) >> SXE_VFREQ_MSGINFO_SHIFT; + u32 vid = (msgbuf[1] & SXE_VLVF_VLANID_MASK); + u8 tcs = sxe_dcb_tc_get(adapter); + s32 ret; + + if (adapter->vt_ctxt.vf_info[vf].pf_vlan || tcs) { + LOG_MSG_WARN(drv, + "vf %d attempted to override administratively set VLAN configuration\n" + "reload the vf driver to resume operations\n", + vf); + ret = -1; + goto l_ret; + } + + if (!vid && !add) { + LOG_WARN_BDF("do not allow remove vlan 0\n"); + ret = 0; + goto l_ret; + } + + ret = sxe_vf_vlan_configure(adapter, add, vid, vf); + LOG_INFO_BDF("vf[%u] %s vid[%u] finished, and ret = %d\n", + vf, add ? "add" : "delete", vid, ret); + +l_ret: + return ret; +} + +static s32 sxe_vf_cast_mode_handler(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + s32 ret = 0; + struct sxe_cast_mode_msg cast_msg = *(struct sxe_cast_mode_msg *)msgbuf; + struct sxe_vf_info *vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + struct sxe_hw *hw = &adapter->hw; + u32 mode = cast_msg.cast_mode; + u32 disable; + u32 enable; + u32 value; + + if ((cast_msg.cast_mode > SXE_CAST_MODE_MULTI) + && !vf_info->trusted) { + mode = SXE_CAST_MODE_MULTI; + } + + if (vf_info->cast_mode == mode) { + LOG_INFO_BDF("vf:%d msg.cast_mode:0x%x mode:0x%x trust:%d\n", + vf_idx, cast_msg.cast_mode, mode, vf_info->trusted); + goto l_out; + } + + switch (mode) { + case SXE_CAST_MODE_NONE: + disable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE | SXE_VMOLR_MPE; + enable = 0; + break; + + case SXE_CAST_MODE_MULTI: + disable = SXE_VMOLR_MPE; + enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE; + break; + + case SXE_CAST_MODE_ALLMULTI: + disable = 0; + enable = SXE_VMOLR_BAM | SXE_VMOLR_ROMPE | + SXE_VMOLR_MPE; + break; + + case SXE_CAST_MODE_PROMISC: + ret = -EOPNOTSUPP; + LOG_ERROR_BDF("vf:%d promisc mode not support.(ret:%d)\n", + vf_idx, ret); + goto l_out; + + default: + ret = -SXE_ERR_PARAM; + LOG_ERROR_BDF("vf:%u invalid cast mode:0x%x.\n", vf_idx, mode); + goto l_out; + } + + value = hw->filter.mac.ops->pool_rx_mode_get(hw, vf_idx); + value &= ~disable; + value |= enable; + hw->filter.mac.ops->pool_rx_mode_set(hw, value, vf_idx); + + LOG_INFO_BDF("vf:%d filter reg:0x%x mode:%d.\n", vf_idx, value, mode); + + vf_info->cast_mode = mode; + +l_out: + return ret; +} + +static s32 sxe_vf_link_enable_get(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + s32 ret = 0; + struct sxe_link_enable_msg *msg = (struct sxe_link_enable_msg *)msgbuf; + + switch(adapter->vt_ctxt.vf_info[vf_idx].mbx_version) { + case SXE_MBX_API_12: + case SXE_MBX_API_13: + case SXE_MBX_API_14: + msg->link_enable = adapter->vt_ctxt.vf_info[vf_idx].link_enable; + break; + default: + ret = -EOPNOTSUPP; + } + LOG_INFO_BDF("ret: %d, vf: %d, link_enable: %d\n", ret, vf_idx, msg->link_enable); + + return ret; +} + +static s32 sxe_pf_rss_redir_tbl_get(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + s32 ret = 0; + u32 i, j; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = sxe_rss_redir_tbl_size_get(); + struct sxe_redir_tbl_msg *msg = (struct sxe_redir_tbl_msg *)msgbuf; + + if (!adapter->vt_ctxt.vf_info[vf_idx].rss_query_enabled) { + LOG_WARN_BDF("vf[%u] rss disable\n", vf_idx); + ret = -EPERM; + goto l_end; + } + + for (i = 0; i < reta_size / 16; i++) { + msg->entries[i] = 0; + for (j = 0; j < 16; j++) + msg->entries[i] |= + (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + +l_end: + return ret; +} + +static s32 sxe_pf_rss_key_get(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + s32 ret = 0; + struct sxe_rss_hsah_key_msg *msg = (struct sxe_rss_hsah_key_msg *)msgbuf; + + if (!adapter->vt_ctxt.vf_info[vf_idx].rss_query_enabled) { + LOG_WARN_BDF("vf[%u] rss disable\n", vf_idx); + ret = -EPERM; + goto l_end; + } + + memcpy(msg->hash_key, adapter->rss_key, SXE_RSS_KEY_SIZE); + +l_end: + return ret; +} + +static s32 sxe_pf_rss_conf_get(struct sxe_adapter *adapter, + u32 *msgbuf, u8 vf_idx) +{ + struct sxe_rss_hash_msg *msg = (struct sxe_rss_hash_msg *)msgbuf; + u8 rss_key[SXE_RSS_KEY_SIZE] = {0}; + struct sxe_rss_hash_config rss_conf; + + rss_conf.rss_key = rss_key; + sxe_rss_hash_conf_get(adapter, &rss_conf); + + memcpy(msg->hash_key, rss_conf.rss_key, SXE_RSS_KEY_SIZE); + msg->rss_hf = rss_conf.rss_hf; + + return 0; +} + +STATIC struct sxe_msg_table msg_table[] = { + [SXE_VFREQ_MAC_ADDR_SET] = {SXE_VFREQ_MAC_ADDR_SET, sxe_vf_mac_addr_set_handler}, + [SXE_VFREQ_MC_ADDR_SYNC] = {SXE_VFREQ_MC_ADDR_SYNC, sxe_vf_mc_addr_sync}, + [SXE_VFREQ_VLAN_SET] = {SXE_VFREQ_VLAN_SET, sxe_vf_vlan_update_handler}, + [SXE_VFREQ_LPE_SET] = {SXE_VFREQ_LPE_SET, sxe_vf_rx_max_frame_set}, + [SXE_VFREQ_UC_ADDR_SYNC] = {SXE_VFREQ_UC_ADDR_SYNC, sxe_vf_uc_addr_sync_handler}, + [SXE_VFREQ_API_NEGOTIATE] = {SXE_VFREQ_API_NEGOTIATE, sxe_mbx_api_set_handler}, + [SXE_VFREQ_RING_INFO_GET] = {SXE_VFREQ_RING_INFO_GET, sxe_pf_ring_info_get}, + [SXE_VFREQ_REDIR_TBL_GET] = {SXE_VFREQ_REDIR_TBL_GET, sxe_pf_rss_redir_tbl_get}, + [SXE_VFREQ_RSS_KEY_GET] = {SXE_VFREQ_RSS_KEY_GET, sxe_pf_rss_key_get}, + [SXE_VFREQ_CAST_MODE_SET] = {SXE_VFREQ_CAST_MODE_SET, sxe_vf_cast_mode_handler}, + [SXE_VFREQ_LINK_ENABLE_GET] = {SXE_VFREQ_LINK_ENABLE_GET, sxe_vf_link_enable_get}, + +#ifdef SXE_IPSEC_CONFIGURE + [SXE_VFREQ_IPSEC_ADD] = {SXE_VFREQ_IPSEC_ADD, sxe_vf_ipsec_add}, + [SXE_VFREQ_IPSEC_DEL] = {SXE_VFREQ_IPSEC_DEL, sxe_vf_ipsec_del}, +#endif + + [SXE_VFREQ_RSS_CONF_GET] = {SXE_VFREQ_RSS_CONF_GET, sxe_pf_rss_conf_get}, +}; + +STATIC s32 sxe_req_msg_handle(struct sxe_adapter *adapter, u32 *msg, + u8 vf_idx) +{ + struct sxe_hw *hw = &adapter->hw; + s32 ret; + u16 cmd_id = msg[0] & SXE_VFREQ_MASK; + + hw->setup.ops->regs_flush(hw); + +#ifdef SXE_IPSEC_CONFIGURE + if (cmd_id > SXE_VFREQ_IPSEC_DEL) { +#else + if (cmd_id > SXE_VFREQ_LINK_ENABLE_GET && + cmd_id <= SXE_VFREQ_IPSEC_DEL) { +#endif + ret = -SXE_ERR_PARAM; + LOG_ERROR_BDF("vf_idx:%u msg:0x%x invalid cmd_id:0x%x.\n", + vf_idx, msg[0], cmd_id); + goto l_out; + } + + if (cmd_id == SXE_VFREQ_RESET) { + ret = 0; + sxe_vf_rst_msg_handle(adapter, vf_idx); + goto l_out; + } + + if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) { + msg[0] |= SXE_MSGTYPE_NACK; + ret = hw->mbx.ops->msg_send(hw, msg, + SXE_MSG_NUM(sizeof(msg[0])), vf_idx); + LOG_WARN_BDF("vf_idx:%d is_ready:0 send nack to vf.ret:%d.\n", + vf_idx, ret); + goto l_out; + } + + if (msg_table[cmd_id].msg_func) { + ret = msg_table[cmd_id].msg_func(adapter, msg, vf_idx); + LOG_INFO_BDF("msg:0x%x cmd_id:0x%x handle done.ret:%d\n", + msg[0], cmd_id, ret); + } else { + ret = -SXE_ERR_PARAM; + LOG_ERROR_BDF("msg_type:0x%x cmdId:0x%x invalid.(err:%d)\n", + msg[0], cmd_id, ret); + } + if (!ret) { + msg[0] |= SXE_MSGTYPE_ACK; + } else { + msg[0] |= SXE_MSGTYPE_NACK; + LOG_INFO_BDF("vf:%d msg:0x%x no handler or handle fail.(ret:%d)\n", + vf_idx, msg[0], ret); + } + + ret = hw->mbx.ops->msg_send(hw, msg, SXE_MBX_MSG_NUM, vf_idx); + if (ret) { + LOG_ERROR_BDF("vf:%d msg:0x%x reply fail.(err:%d).\n", + vf_idx, msg[0], ret); + } + + LOG_INFO_BDF("pf reply vf:%d msg:0x%x done.ret:%d\n", vf_idx, msg[0], ret); + +l_out: + return ret; +} + +s32 sxe_vf_req_task_handle(struct sxe_adapter *adapter, u8 vf_idx) +{ + u32 msg[SXE_MBX_MSG_NUM] = {0}; + s32 ret; + + ret = adapter->hw.mbx.ops->msg_rcv(&adapter->hw, msg, SXE_MBX_MSG_NUM, vf_idx); + if (ret) { + LOG_DEV_ERR("vf_idx:%d rcv vf req msg:0x%x fail.(err:%d)\n", + vf_idx, msg[0], ret); + goto l_out; + } + + LOG_INFO_BDF("rcv vf_idx:%d req msg:0x%x.\n", vf_idx, msg[0]); + + if (msg[0] & (SXE_MSGTYPE_ACK | SXE_MSGTYPE_NACK)) { + LOG_WARN_BDF("msg:0x%x has handled, no need dup handle.\n", + msg[0]); + goto l_out; + } + + ret = sxe_req_msg_handle(adapter, msg, vf_idx); + if (ret) { + LOG_ERROR_BDF("vf:%d request msg handle fail.(err:%d)\n", + vf_idx, ret); + } + +l_out: + return ret; +} + +void sxe_vf_ack_task_handle(struct sxe_adapter *adapter, u8 vf_idx) +{ + u32 msg = SXE_MSGTYPE_NACK; + + if (!adapter->vt_ctxt.vf_info[vf_idx].is_ready) { + adapter->hw.mbx.ops->msg_send(&adapter->hw, &msg, + SXE_MSG_NUM(sizeof(msg)), vf_idx); + } + + return; +} + +s32 sxe_set_vf_spoofchk(struct net_device *dev, s32 vf_idx, bool status) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_hw *hw = &adapter->hw; + s32 ret = 0; + + if (vf_idx >= adapter->vt_ctxt.num_vfs) { + ret = -EINVAL; + LOG_ERROR_BDF("vf_idx:%d exceed vf nums:%d.(err:%d)\n", + vf_idx, adapter->vt_ctxt.num_vfs, ret); + goto l_end; + } + + adapter->vt_ctxt.vf_info[vf_idx].spoof_chk_enabled = status; + + hw->dma.ops->pool_mac_anti_spoof_set(hw, vf_idx, status); + hw->dma.ops->pool_vlan_anti_spoof_set(hw, vf_idx, status); + + LOG_INFO_BDF("vf:%u spoof check:%s.\n", + vf_idx, status ? "on" : "off"); +l_end: + return ret; +} + +s32 sxe_set_vf_trust(struct net_device *dev, + s32 vf_idx, bool status) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_vf_info *vf_info; + s32 ret = 0; + + if (vf_idx >= adapter->vt_ctxt.num_vfs) { + ret = -EINVAL; + LOG_ERROR_BDF("vf_idx:%d exceed vf nums:%d.(err:%d)\n", + vf_idx, adapter->vt_ctxt.num_vfs, ret); + goto l_out; + } + + vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + if (vf_info->trusted == status) { + LOG_INFO_BDF("current vf:%d trust status:%d, skip dup set.\n", + vf_idx, status); + goto l_out; + } + + vf_info->trusted = status; + vf_info->is_ready = false; + + sxe_vf_trust_update_notify(adapter, vf_idx); + + LOG_MSG_INFO(drv, "vf:%u trust:%s.\n", + vf_idx, status ? "on" : "off"); + +l_out: + return ret; +} + +s32 sxe_set_vf_rss_query_en(struct net_device *dev, s32 vf_idx, bool status) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + s32 ret = 0; + + if (vf_idx >= adapter->vt_ctxt.num_vfs) { + ret = -EINVAL; + LOG_ERROR_BDF("vf_idx:%d exceed vf nums:%d.(err:%d)\n", + vf_idx, adapter->vt_ctxt.num_vfs, ret); + goto l_end; + } + + adapter->vt_ctxt.vf_info[vf_idx].rss_query_enabled = status; + + LOG_INFO_BDF("vf:%u query_rss:%s.\n", + vf_idx, status ? "on" : "off"); + +l_end: + return ret; +} + +s32 sxe_get_vf_config(struct net_device *dev, s32 vf_idx, + struct ifla_vf_info *info) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_vf_info *vf_info; + s32 ret = 0; + + if (vf_idx >= adapter->vt_ctxt.num_vfs) { + ret = -EINVAL; + LOG_ERROR_BDF("vf_idx:%d exceed vf nums:%d.(err:%d)\n", + vf_idx, adapter->vt_ctxt.num_vfs, ret); + goto l_end; + } + + vf_info = &adapter->vt_ctxt.vf_info[vf_idx]; + + ether_addr_copy(info->mac, vf_info->mac_addr); + info->vf = vf_idx; + info->min_tx_rate = 0; + info->max_tx_rate = vf_info->tx_rate; + info->vlan = vf_info->pf_vlan; + info->qos = vf_info->pf_qos; + info->spoofchk = vf_info->spoof_chk_enabled; + info->rss_query_en = vf_info->rss_query_enabled; + info->trusted = vf_info->trusted; +#ifdef HAVE_NDO_SET_VF_LINK_STATE + info->linkstate = vf_info->link_state; +#endif + + LOG_INFO_BDF("vf_idx:%d get config info pf_vlan:%d pf_qos:%d spoof_chk:%d " + "rss_query_en:%d trusted:%d.\n", + vf_idx, vf_info->pf_vlan, vf_info->pf_qos, + vf_info->spoof_chk_enabled, + vf_info->rss_query_enabled, vf_info->trusted); + +l_end: + return ret; +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE +void sxe_set_vf_link_enable(struct sxe_adapter *adapter, s32 vf_idx, s32 state) +{ + u32 msg; + unsigned long flags; + struct sxe_vf_info *vfinfo = &(adapter->vt_ctxt.vf_info[vf_idx]); + vfinfo->link_state = state; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (test_bit(SXE_DOWN, &adapter->state)) + vfinfo->link_enable = false; + else + vfinfo->link_enable = true; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vfinfo->link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vfinfo->link_enable = false; + break; + } + + sxe_vf_rxtx_set(adapter, vf_idx); + + vfinfo->is_ready = false; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + msg = SXE_CTRL_MSG_REINIT; + sxe_mbx_msg_send(adapter, vf_idx, &msg, SXE_MSG_NUM(sizeof(msg))); + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); +} + +s32 sxe_set_vf_link_state(struct net_device *netdev, s32 vf_idx, s32 state) +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + s32 ret = 0; + + if (vf_idx < 0 || vf_idx >= adapter->vt_ctxt.num_vfs) { + LOG_DEV_ERR("invalid vf idx: %d\n", vf_idx); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + LOG_DEV_INFO("set VF %d link state %d - not supported\n", + vf_idx, state); + break; + case IFLA_VF_LINK_STATE_DISABLE: + LOG_DEV_INFO("set VF %d link state disable\n", vf_idx); + sxe_set_vf_link_enable(adapter, vf_idx, state); + break; + case IFLA_VF_LINK_STATE_AUTO: + LOG_DEV_INFO("set VF %d link state auto\n", vf_idx); + sxe_set_vf_link_enable(adapter, vf_idx, state); + break; + default: + LOG_DEV_ERR("set VF %d - invalid link state %d\n", vf_idx, state); + ret = -EINVAL; + } +out: + return ret; +} + +void sxe_vf_enable_and_reinit_notify_vf_all(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_vf_info *vfinfo = adapter->vt_ctxt.vf_info; + + for(i = 0; i < adapter->vt_ctxt.num_vfs; i++) { + sxe_set_vf_link_enable(adapter, i, vfinfo[i].link_state); + } +} +#endif + +static void sxe_pcie_vt_mode_set(struct sxe_adapter *adapter) +{ + u16 pool_mask = sxe_pool_mask_get(adapter); + struct sxe_hw *hw = &adapter->hw; + u32 value; + + if (pool_mask == SXE_8Q_PER_POOL_MASK) { + value = SXE_GCR_EXT_VT_MODE_16; + } else if (pool_mask == SXE_4Q_PER_POOL_MASK) { + value = SXE_GCR_EXT_VT_MODE_32; + } else { + value = SXE_GCR_EXT_VT_MODE_64; + } + + hw->pcie.ops->vt_mode_set(hw, value); + + return; +} + +void sxe_vt1_configure(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u16 pf_pool_num = adapter->pool_f.pf_num_used; + u8 pf_pool_idx = PF_POOL_INDEX(0); + u8 vf_reg_index = pf_pool_idx / VF_BLOCK_BITS; + u8 vf_bit_index = pf_pool_idx % VF_BLOCK_BITS; + u8 i; + unsigned long flags; + + if (!(adapter->cap & SXE_SRIOV_ENABLE)) { + LOG_INFO_BDF("cap:0x%x sriov disabled no need configure vt.\n", + adapter->cap); + goto l_end; + } + + hw->filter.mac.ops->vt_ctrl_configure(hw, pf_pool_idx); + + while (pf_pool_num--) { + hw->filter.mac.ops->pool_rx_mode_set(hw, SXE_VMOLR_AUPE, + PF_POOL_INDEX(pf_pool_num)); + } + + hw->dma.ops->rx_pool_set(hw, vf_reg_index, GENMASK(31, vf_bit_index)); + hw->dma.ops->rx_pool_set(hw, (vf_reg_index ^ 1), (vf_reg_index - 1)); + + hw->dma.ops->tx_pool_set(hw, vf_reg_index, GENMASK(31, vf_bit_index)); + hw->dma.ops->tx_pool_set(hw, (vf_reg_index ^ 1), (vf_reg_index - 1)); + + if (adapter->vt_ctxt.bridge_mode == BRIDGE_MODE_VEB) { + hw->dma.ops->vt_pool_loopback_switch(hw, true); + } + + hw->filter.mac.ops->uc_addr_pool_enable(hw, 0, pf_pool_idx); + + sxe_pcie_vt_mode_set(adapter); + + adapter->cap &= ~SXE_VLAN_PROMISC; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (i = 0; i < adapter->vt_ctxt.num_vfs; i++) { + sxe_set_vf_spoofchk(adapter->netdev, i, + adapter->vt_ctxt.vf_info[i].spoof_chk_enabled); + sxe_set_vf_rss_query_en(adapter->netdev, i, + adapter->vt_ctxt.vf_info[i].rss_query_enabled); + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + +l_end: + return; +} + +bool sxe_vf_tx_pending(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u8 pool_mask = sxe_pool_mask_get(adapter); + u8 ring_per_pool = __ALIGN_MASK(1, ~pool_mask); + u8 vf_idx; + u8 ring_idx; + u32 head; + u32 tail; + bool ret = false; + unsigned long flags; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + if (!adapter->vt_ctxt.num_vfs) { + goto l_out; + } + + for (vf_idx = 0; vf_idx < adapter->vt_ctxt.num_vfs; vf_idx++) { + for (ring_idx = 0; ring_idx < ring_per_pool; ring_idx++) { + hw->dma.ops->tx_ring_info_get(hw, + vf_idx * ring_per_pool + ring_idx, &head, &tail); + + if (head != tail) { + LOG_DEV_INFO("vf:%u ring_per_pool:%u " + "ring_idx:%u head:%u tail:%u " + "has pending data.\n", + vf_idx, ring_per_pool, + ring_idx, head, tail); + ret = true; + goto l_out; + } + } + } + +l_out: + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + return ret; +} + +void sxe_vf_disable(struct sxe_adapter *adapter) +{ + u16 rss; + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + struct sxe_pool_feature *pool = &adapter->pool_f; + if (bitmap_weight(adapter->vt_ctxt.pf_pool_bitmap, pool->pf_num_used) == 1) { + rss = min_t(u16, SXE_RSS_RING_NUM_MAX, num_online_cpus()); + adapter->cap &= ~SXE_SRIOV_ENABLE; + adapter->cap &= ~SXE_MACVLAN_ENABLE; + } else { + rss = min_t(u16, MIN_QUEUES_IN_SRIOV, num_online_cpus()); + } +#else + rss = min_t(u16, SXE_RSS_RING_NUM_MAX, num_online_cpus()); + adapter->cap &= ~SXE_SRIOV_ENABLE; +#endif + adapter->pool_f.vf_num_used = 0; + adapter->ring_f.rss_limit = rss; + + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs = SXE_DCB_8_TC; + adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs = SXE_DCB_8_TC; + + LOG_INFO_BDF("vf disable update rss_limit to %u cap:0x%x.\n", + adapter->ring_f.rss_limit, adapter->cap); + msleep(SXE_VF_DISABLE_WAIT); + + return; +} + +#ifdef CONFIG_PCI_IOV + +void sxe_vf_resource_release(struct sxe_adapter *adapter) +{ + u8 vf_idx = 0; + u8 vf_num = adapter->vt_ctxt.num_vfs; + unsigned long flags; + + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + adapter->vt_ctxt.num_vfs = 0; + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + smp_wmb(); + + if (vf_num) { + struct sxe_vf_info *vf_info = adapter->vt_ctxt.vf_info; + struct pci_dev *vf_dev = vf_info->vf_dev; + + for (vf_idx = 0; vf_idx < vf_num; vf_idx++) { + vf_dev = vf_info[vf_idx].vf_dev; + if (!vf_dev) { + LOG_WARN_BDF("vf:%u vf pci dev null.\n", vf_idx); + continue; + } + + vf_info[vf_idx].vf_dev = NULL; + pci_dev_put(vf_dev); + } + + SXE_KFREE(vf_info); + SXE_KFREE(adapter->vt_ctxt.vf_uc_list); + } + + LOG_INFO_BDF("%u vf resouce released.\n", vf_num); + + return; +} + +void sxe_vf_exit(struct sxe_adapter *adapter) +{ + rtnl_lock(); + sxe_vf_resource_release(adapter); + sxe_vf_disable(adapter); + rtnl_unlock(); + + pci_disable_sriov(adapter->pdev); + + return; +} + +static inline void sxe_vf_mac_addr_init(struct sxe_adapter *adapter, + u8 num_vfs) +{ + u8 i; + + for (i = 0; i < num_vfs; i++) { + eth_zero_addr(adapter->vt_ctxt.vf_info[i].mac_addr); + } + + return; +} + +static void sxe_vf_uc_addr_list_init(struct sxe_adapter *adapter, u8 num_vfs) +{ + u8 vf_uc_num = SXE_UC_ENTRY_NUM_MAX - (SXE_MAX_MACVLANS + 1 + num_vfs); + struct sxe_vf_uc_addr_list *mac_list; + struct sxe_virtual_context *vf = &adapter->vt_ctxt; + u8 i; + + if (!vf_uc_num) { + LOG_WARN_BDF("num_vfs:%d has no available rar.\n", num_vfs); + goto l_end; + } + + mac_list = kcalloc(vf_uc_num, sizeof(struct sxe_vf_uc_addr_list), GFP_KERNEL); + if (mac_list) { + INIT_LIST_HEAD(&vf->head.list); + for (i = 0; i < vf_uc_num; i++) { + mac_list[i].vf_idx = -1; + mac_list[i].free = true; + list_add(&mac_list[i].list, &(vf->head.list)); + } + + vf->vf_uc_list = mac_list; + + LOG_INFO_BDF("vf uc mac addr list mem cnt:%u num_vfs:%u.\n", + vf_uc_num, num_vfs); + } + +l_end: + return; +} + +static s32 sxe_vf_info_init(struct sxe_adapter *adapter, u8 num_vfs) +{ + u8 i; + s32 ret; + + adapter->vt_ctxt.vf_info = kcalloc(num_vfs, sizeof(struct sxe_vf_info), + GFP_KERNEL); + if (!adapter->vt_ctxt.vf_info) { + ret = -ENOMEM; + LOG_ERROR_BDF("num_vfs:%d alloc size:%zuB fail.(err:%d)\n", + num_vfs, num_vfs * sizeof(struct sxe_vf_info), ret); + goto l_out; + } + + for (i = 0; i < num_vfs; i++) { + adapter->vt_ctxt.vf_info[i].trusted = false; + adapter->vt_ctxt.vf_info[i].cast_mode = SXE_CAST_MODE_NONE; + adapter->vt_ctxt.vf_info[i].spoof_chk_enabled = true; + adapter->vt_ctxt.vf_info[i].link_enable = true; + adapter->vt_ctxt.vf_info[i].rss_query_enabled = false; + } + + ret = 0; + + LOG_INFO_BDF("num_vfs:%u vf_info:0x%pK.\n", num_vfs, adapter->vt_ctxt.vf_info); + +l_out: + return ret; +} + +static s32 sxe_vf_init(struct sxe_adapter *adapter, u8 num_vfs) +{ + struct sxe_hw *hw = &adapter->hw; + s32 ret = 0; + + ret = sxe_vf_info_init(adapter, num_vfs); + if (ret) { + LOG_ERROR_BDF("num_vfs:%u vf info alloc memory fail.(err:%d)\n", + num_vfs, ret); + goto l_out; + } + + adapter->vt_ctxt.bridge_mode = BRIDGE_MODE_VEB; + hw->dma.ops->vt_pool_loopback_switch(hw, true); + + if (num_vfs < SXE_VF_NUM_16) { + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs = SXE_DCB_8_TC; + adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs = SXE_DCB_8_TC; + } else if (num_vfs < SXE_VF_NUM_32) { + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs = SXE_DCB_4_TC; + adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs = SXE_DCB_4_TC; + } else { + adapter->dcb_ctxt.cee_cfg.num_tcs.pg_tcs = SXE_DCB_1_TC; + adapter->dcb_ctxt.cee_cfg.num_tcs.pfc_tcs = SXE_DCB_1_TC; + } + + sxe_vf_uc_addr_list_init(adapter, num_vfs); + + sxe_vf_mac_addr_init(adapter, num_vfs); + + adapter->vt_ctxt.num_vfs = num_vfs; + adapter->pool_f.vf_num_used = num_vfs; + +#ifdef HAVE_MACVLAN_OFFLOAD_SUPPORT + adapter->cap |= SXE_SRIOV_ENABLE | + SXE_MACVLAN_ENABLE; +#else + adapter->cap |= SXE_SRIOV_ENABLE; +#endif + + adapter->cap &= ~(SXE_LRO_CAPABLE | + SXE_LRO_ENABLE); + + LOG_MSG_INFO(probe, "iov is enabled with %d vfs\n", num_vfs); + +l_out: + return ret; +} + +void sxe_vf_down(struct sxe_adapter *adapter) +{ + struct sxe_virtual_context *vt_ctxt = &adapter->vt_ctxt; + struct sxe_hw *hw = &adapter->hw; + u8 i; + unsigned long flags; + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + if (vt_ctxt->num_vfs == 0) { + LOG_INFO_BDF("vf num:%d no need down.\n", vt_ctxt->num_vfs); + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + goto l_out; + } + + hw->irq.ops->set_eitrsel(hw, 0); + for (i = 0; i < vt_ctxt->num_vfs; i++) { + vt_ctxt->vf_info[i].is_ready = false; + } + + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + + sxe_netdev_down_notify_vf_all(adapter); + + hw->dma.ops->tx_pool_set(hw, 0, 0); + hw->dma.ops->tx_pool_set(hw, 1, 0); + hw->dma.ops->rx_pool_set(hw, 0, 0); + hw->dma.ops->rx_pool_set(hw, 1, 0); + +l_out: + return; +} + +void sxe_sriov_init(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->mbx.ops->init(hw); + + spin_lock_init(&adapter->vt_ctxt.vfs_lock); + + pci_sriov_set_totalvfs(adapter->pdev, SXE_VF_DRV_MAX); + + if (max_vfs > 0) { + LOG_DEV_WARN("max_vfs module parameter is deprecated - " + "please use the pci sysfs interface instead.\n"); + if (max_vfs > SXE_VF_DRV_MAX) { + LOG_DEV_WARN("max_vfs parameter invalid:%u, no assigne vfs\n", + max_vfs); + max_vfs = 0; + } + } + + sxe_param_sriov_enable(adapter, max_vfs); + + return; +} + +static void sxe_vf_dev_info_get(struct sxe_adapter *adapter) +{ + struct pci_dev *pf_dev = adapter->pdev; + struct pci_dev *vf_dev; + u32 sriov_cap; + u16 vf_id; + u8 vf_idx = 0; + + sriov_cap = pci_find_ext_capability(pf_dev, PCI_EXT_CAP_ID_SRIOV); + if (!sriov_cap) { + LOG_WARN_BDF("sriov capability not found.\n"); + goto l_out; + } + + pci_read_config_word(pf_dev, sriov_cap + PCI_SRIOV_VF_DID, &vf_id); + + vf_dev = pci_get_device(pf_dev->vendor, vf_id, NULL); + LOG_INFO_BDF("vf dev id:0x%x pci dev:0x%pK.\n", vf_id, vf_dev); + + while (vf_dev) { + if (vf_dev->is_virtfn && + (vf_dev->physfn == pf_dev) && + (vf_idx < adapter->vt_ctxt.num_vfs)) { + pci_dev_get(vf_dev); + adapter->vt_ctxt.vf_info[vf_idx].vf_dev = vf_dev; + vf_idx++; + } + + vf_dev = pci_get_device(pf_dev->vendor, vf_id, vf_dev); + } + +l_out: + return; +} + +static void sxe_sriov_ring_reinit(struct sxe_adapter *adapter) +{ + sxe_ring_reassign(adapter, sxe_dcb_tc_get(adapter)); + + return; +} + +static s32 sxe_sriov_enable_prepare(struct sxe_adapter *adapter, + u8 num_vfs) +{ + u8 pools_used; + u8 tc; + u8 max; + s32 ret = 0; + struct sxe_pool_feature *pool = &adapter->pool_f; + + if (adapter->xdp_prog) { + ret = -EINVAL; + LOG_MSG_ERR(probe, "num_vfs:%d sriov not support xdp.\n", num_vfs); + goto l_out; + } + + tc = sxe_dcb_tc_get(adapter); + pools_used = bitmap_weight(adapter->vt_ctxt.pf_pool_bitmap, pool->pf_num_used); + + max = (tc > 4) ? SXE_MAX_VFS_8TC : + (tc > 1) ? SXE_MAX_VFS_4TC : SXE_MAX_VFS_1TC; + + if (num_vfs > (max - pools_used)) { + LOG_DEV_ERR("tc:%d pool_used:%d num_vfs:%d exceed max vfs:%d\n", + tc, pools_used, num_vfs, max); + ret = -EPERM; + goto l_out; + } + +l_out: + return ret; +} + +static s32 sxe_sriov_disable(struct pci_dev *pdev) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + u32 assigned = pci_vfs_assigned(pdev); + u32 cap = adapter->cap; + u8 vf = pci_num_vf(adapter->pdev); + s32 ret = 0; + + if (!(adapter->cap & SXE_SRIOV_ENABLE)) { + LOG_INFO_BDF("vf:%d sriov has been disabled.\n", pci_num_vf(pdev)); + goto l_out; + } + + if (assigned) { + ret = -EPERM; + LOG_DEV_ERR("%d vf assigned to guest, can't disable sriov.(err:%d)\n", + assigned, ret); + goto l_out; + } + + pci_disable_sriov(adapter->pdev); + + + rtnl_lock(); + + sxe_vf_resource_release(adapter); + + sxe_vf_disable(adapter); + + if ((cap != adapter->cap) || + (vf != pci_num_vf(adapter->pdev))) { + LOG_INFO_BDF("previous cap:0x%x vf:%d, now cap:0x%x vf:%d changed.\n", + cap, vf, adapter->cap, pci_num_vf(adapter->pdev)); + sxe_sriov_ring_reinit(adapter); + } + + rtnl_unlock(); + +l_out: + return ret; +} + +static s32 sxe_sriov_enable(struct pci_dev *pdev, u8 num_vfs) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + u8 vf_current = pci_num_vf(adapter->pdev); + s32 ret; + + if (vf_current == num_vfs) { + ret = num_vfs; + LOG_INFO_BDF("existed %u vfs, skip dup create.\n", num_vfs); + goto l_out; + } + + if (vf_current) { + sxe_vf_exit(adapter); + } + + rtnl_lock(); + + ret = sxe_sriov_enable_prepare(adapter, num_vfs); + if (ret) { + LOG_ERROR_BDF("num_vfs:%d prepare fail.(err:%d)", num_vfs, ret); + goto l_unlock; + } + + ret = sxe_vf_init(adapter, num_vfs); + if (ret) { + LOG_ERROR_BDF("sxe vf init fail.(err:%d)\n", ret); + goto l_unlock; + } + + sxe_sriov_ring_reinit(adapter); + + rtnl_unlock(); + + if (adapter->cap & SXE_SRIOV_ENABLE) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + LOG_MSG_ERR(probe, "num_vfs:%d enable pci sriov fail.(err:%d)\n", + num_vfs, ret); + goto l_vf_free; + } + + sxe_vf_dev_info_get(adapter); + + LOG_INFO_BDF("cap:0x%x cap2:0x%x num_vfs:%d enable sriov success.\n", + adapter->cap, adapter->cap2, num_vfs); + + ret = num_vfs; + } else { + ret = -EPERM; + LOG_ERROR_BDF("num_vfs:%d driver sriov is disabled, " + "can't enable sriov.(err:%d)\n", + num_vfs, ret); + goto l_vf_free; + } + + return ret; + +l_unlock: + rtnl_unlock(); + +l_out: + return ret; + +l_vf_free: + sxe_vf_exit(adapter); + return ret; +} + +#ifdef HAVE_NO_PCIE_FLR +static inline void sxe_issue_vf_flr(struct sxe_adapter *adapter, + struct pci_dev *vf_dev) +{ + int pos, i; + u16 status; + + for (i = 0; i < 4; i++) { + if (i) + msleep((1 << (i - 1)) * 100); + + pcie_capability_read_word(vf_dev, PCI_EXP_DEVSTA, &status); + if (!(status & PCI_EXP_DEVSTA_TRPND)) + goto clear; + } + + LOG_DEV_WARN("Issuing VFLR with pending transactions\n"); + +clear: + pos = pci_find_capability(vf_dev, PCI_CAP_ID_EXP); + if (!pos) + return; + + LOG_DEV_ERR("Issuing VFLR for VF %s\n", pci_name(vf_dev)); + pci_write_config_word(vf_dev, pos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + msleep(100); +} +#endif + +void sxe_bad_vf_flr(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 num = hw->stat.ops->tx_packets_num_get(hw); + unsigned long flags; + u32 vf_idx; + + if (!netif_carrier_ok(adapter->netdev)) { + LOG_DEBUG_BDF("no need check vf status.\n"); + goto l_out; + } + + if (num) { + LOG_DEBUG_BDF("no need vf flr tx good packets num:%u.\n", num); + goto l_out; + } + + if (!pdev) { + goto l_out; + } + + spin_lock_irqsave(&adapter->vt_ctxt.vfs_lock, flags); + for (vf_idx = 0; vf_idx < adapter->vt_ctxt.num_vfs; vf_idx++) { + struct pci_dev *vf_dev = adapter->vt_ctxt.vf_info[vf_idx].vf_dev; + u16 status_reg; + + LOG_INFO_BDF("num_vfs:%u vf_idx:%u pci dev:0x%pK.\n", + adapter->vt_ctxt.num_vfs, + vf_idx, vf_dev); + + if (!vf_dev) + continue; + pci_read_config_word(vf_dev, PCI_STATUS, &status_reg); + if (status_reg != SXE_READ_CFG_WORD_FAILED && + status_reg & PCI_STATUS_REC_MASTER_ABORT) { +#ifdef HAVE_NO_PCIE_FLR + sxe_issue_vf_flr(adapter, vf_dev); +#else + pcie_flr(vf_dev); +#endif + LOG_WARN_BDF("vf_idx:%u status_reg:0x%x pcie flr.\n", + vf_idx, status_reg); + } + } + spin_unlock_irqrestore(&adapter->vt_ctxt.vfs_lock, flags); + +l_out: + return; +} + +void sxe_spoof_packets_check(struct sxe_adapter *adapter) +{ + u32 num; + struct sxe_hw *hw = &adapter->hw; + + if (adapter->vt_ctxt.num_vfs == 0) { + goto l_out; + } + + num = hw->stat.ops->unsecurity_packets_num_get(hw); + if (!num) { + goto l_out; + } + + LOG_MSG_WARN(drv, "%u spoof packets detected.\n", num); + +l_out: + return; +} + +void sxe_param_sriov_enable(struct sxe_adapter *adapter, u8 user_num_vfs) +{ + u8 vf_current = pci_num_vf(adapter->pdev); + s32 ret = 0; + u8 num_vfs; + + if (vf_current) { + num_vfs = vf_current; + LOG_DEV_WARN("virtual functions already enabled for this device " + "- please reload all VF drivers to avoid spoofed packet errors\n"); + } else { + num_vfs = user_num_vfs; + } + + if (num_vfs == 0) { + goto l_end; + } + + if (!vf_current) { + ret = pci_enable_sriov(adapter->pdev, num_vfs); + if (ret) { + LOG_MSG_ERR(probe, "enable %u vfs failed.(err:%d)\n", + num_vfs, ret); + goto l_vf_exit; + } + } + + ret = sxe_vf_init(adapter, num_vfs); + if (ret) { + LOG_MSG_ERR(probe, "init %u vfs failed.(err:%d)\n", num_vfs, ret); + goto l_end; + } + + sxe_vf_dev_info_get(adapter); + + LOG_INFO_BDF("max_vfs:%u fact assign %u vfs.\n", max_vfs, num_vfs); + +l_end: + return; + +l_vf_exit: + sxe_vf_exit(adapter); + return; +} + +#else + +static s32 sxe_sriov_enable(struct pci_dev *pdev, u8 num_vfs) +{ + return 0; +} + +void sxe_vf_resource_release(struct sxe_adapter *adapter) +{ + return; +} + +static s32 sxe_sriov_disable(struct pci_dev *pdev) +{ + struct sxe_adapter *adapter = pci_get_drvdata(pdev); + s32 ret = 0; + + if (!(adapter->cap & SXE_SRIOV_ENABLE)) { + LOG_INFO_BDF("vf:%d sriov has been disabled.\n", pci_num_vf(pdev)); + goto l_out; + } + + rtnl_lock(); + sxe_vf_disable(adapter); + rtnl_unlock(); + +l_out: + return ret; +} + +void sxe_sriov_init(struct sxe_adapter *adapter) +{ + return; +} + +void sxe_vf_exit(struct sxe_adapter *adapter) +{ + return; +} + +void sxe_vf_down(struct sxe_adapter *adapter) +{ + return; +} + +void sxe_bad_vf_flr(struct sxe_adapter *adapter) +{ + return; +} + +void sxe_spoof_packets_check(struct sxe_adapter *adapter) +{ + return; +} + +#endif + +s32 sxe_sriov_configure(struct pci_dev *pdev, s32 num_vfs) +{ + s32 ret; + + if (num_vfs) { + ret = sxe_sriov_enable(pdev, num_vfs); + } else { + ret = sxe_sriov_disable(pdev); + } + + LOG_INFO("%s num_vfs:%d sriov operation done.(ret:%d)\n", + dev_name(&pdev->dev), num_vfs, ret); + + return ret; +} + diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.h new file mode 100644 index 000000000000..b776d3e128ba --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_sriov.h @@ -0,0 +1,232 @@ +#ifndef __SXE_SRIOV_H__ +#define __SXE_SRIOV_H__ + +#include "sxe.h" + +#define SXE_VF_FUNCTION_MAX 64 + +#define SXE_VF_DRV_MAX (SXE_VF_FUNCTION_MAX - 1) + +#define SXE_MAX_VFS_1TC SXE_VF_FUNCTION_MAX +#define SXE_MAX_VFS_4TC 32 +#define SXE_MAX_VFS_8TC 16 + +#define SXE_MSG_NUM(size) DIV_ROUND_UP(size, 4) + +#define SXE_MSGTYPE_ACK 0x80000000 +#define SXE_MSGTYPE_NACK 0x40000000 + +#define SXE_VFREQ_RESET 0x01 +#define SXE_VFREQ_MAC_ADDR_SET 0x02 +#define SXE_VFREQ_MC_ADDR_SYNC 0x03 +#define SXE_VFREQ_VLAN_SET 0x04 +#define SXE_VFREQ_LPE_SET 0x05 + +#define SXE_VFREQ_UC_ADDR_SYNC 0x06 + +#define SXE_VFREQ_API_NEGOTIATE 0x08 + +#define SXE_VFREQ_RING_INFO_GET 0x09 +#define SXE_VFREQ_REDIR_TBL_GET 0x0a +#define SXE_VFREQ_RSS_KEY_GET 0x0b +#define SXE_VFREQ_CAST_MODE_SET 0x0c +#define SXE_VFREQ_LINK_ENABLE_GET 0X0d +#define SXE_VFREQ_IPSEC_ADD 0x0e +#define SXE_VFREQ_IPSEC_DEL 0x0f +#define SXE_VFREQ_RSS_CONF_GET 0x10 + +#define SXE_VFREQ_MASK 0xFF + +#define SXE_CTRL_MSG_LINK_UPDATE 0x100 +#define SXE_CTRL_MSG_NETDEV_DOWN 0x200 + +#define SXE_CTRL_MSG_REINIT 0x400 + +#define SXE_PF_CTRL_MSG_MASK 0x700 +#define SXE_PFREQ_MASK 0xFF00 + +#define SXE_VF_MC_ADDR_NUM_SHIFT 16 + +#define SXE_VFREQ_MSGINFO_SHIFT 16 +#define SXE_VFREQ_MSGINFO_MASK (0xFF << SXE_VFREQ_MSGINFO_SHIFT) + +#define SXE_RETA_ENTRIES_DWORDS (SXE_MAX_RETA_ENTRIES / 16) + +#define SXE_VF_DISABLE_WAIT 100 + +enum sxe_mbx_api_version { + SXE_MBX_API_10 = 0, + SXE_MBX_API_11, + SXE_MBX_API_12, + SXE_MBX_API_13, + SXE_MBX_API_14, + + SXE_MBX_API_NR, +}; + +enum sxe_cast_mode { + SXE_CAST_MODE_NONE = 0, + SXE_CAST_MODE_MULTI, + SXE_CAST_MODE_ALLMULTI, + SXE_CAST_MODE_PROMISC, +}; + +struct sxe_msg_table { + u32 msg_type; + s32 (*msg_func)(struct sxe_adapter *adapter, u32 *msg, u8 vf_idx); +}; + +struct sxe_mbx_api_msg { + u32 msg_type; + u32 api_version; +}; + +struct sxe_uc_addr_msg { + u32 msg_type; + u8 uc_addr[ETH_ALEN]; + u16 pad; +}; + +struct sxe_rst_rcv { + u32 msg_type; +}; + +struct sxe_rst_reply { + u32 msg_type; + u32 mac_addr[2]; + u32 mc_filter_type; + u32 sw_mtu; +}; + +struct sxe_rst_msg { + union { + struct sxe_rst_rcv rcv; + struct sxe_rst_reply reply; + }; +}; + +struct sxe_ring_info_msg { + u32 msg_type; + u8 max_rx_num; + u8 max_tx_num; + u8 tc_num; + u8 default_tc; +}; + +struct sxe_mc_sync_msg { + u16 msg_type; + u16 mc_cnt; + u16 mc_addr_extract[SXE_VF_MC_ENTRY_NUM_MAX]; +}; + +struct sxe_uc_sync_msg { + u16 msg_type; + u16 index; + u32 addr[2]; +}; + +struct sxe_cast_mode_msg { + u32 msg_type; + u32 cast_mode; +}; + +struct sxe_redir_tbl_msg { + u32 type; + u32 entries[SXE_RETA_ENTRIES_DWORDS]; +}; + +struct sxe_rss_hsah_key_msg { + u32 type; + u8 hash_key[SXE_RSS_KEY_SIZE]; +}; + +struct sxe_rss_hash_msg { + u32 type; + u8 hash_key[SXE_RSS_KEY_SIZE]; + u64 rss_hf; +}; + +struct sxe_ipsec_add_msg { + u32 msg_type; + u32 pf_sa_idx; + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 ip_addr[4]; + u32 key[5]; +}; + +struct sxe_ipsec_del_msg { + u32 msg_type; + u32 pf_sa_idx; +}; + +struct sxe_link_enable_msg { + u32 msg_type; + bool link_enable; +}; + +s32 sxe_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto); + +#ifdef HAVE_NDO_SET_VF_LINK_STATE +s32 sxe_set_vf_link_state(struct net_device *netdev, s32 vf_idx, s32 state); + +void sxe_vf_enable_and_reinit_notify_vf_all(struct sxe_adapter *adapter); +#endif + +void sxe_sriov_init(struct sxe_adapter *adapter); + +void sxe_vf_exit(struct sxe_adapter *adapter); + +s32 sxe_sriov_configure(struct pci_dev *pdev, int num_vfs); + +void sxe_vt1_configure(struct sxe_adapter *adapter); + +void sxe_mailbox_irq_handle(struct sxe_adapter *adapter); + +s32 sxe_set_vf_mac(struct net_device *dev, s32 vf_idx, u8 *mac_addr); + +s32 sxe_set_vf_spoofchk(struct net_device *dev, s32 vf_idx, bool status); + +s32 sxe_set_vf_trust(struct net_device *dev, + s32 vf_idx, bool status); + +int sxe_set_vf_rss_query_en(struct net_device *dev, s32 vf_idx, bool status); + +s32 sxe_get_vf_config(struct net_device *dev, s32 vf_idx, + struct ifla_vf_info *info); + +s32 sxe_set_vf_rate(struct net_device *netdev, s32 vf_idx, + s32 min_rate, s32 max_rate); + +s32 sxe_vf_req_task_handle(struct sxe_adapter *adapter, u8 vf_idx); + +void sxe_vf_ack_task_handle(struct sxe_adapter *adapter, u8 vf_idx); + +void sxe_vf_hw_rst(struct sxe_adapter *adapter, u8 vf_idx); + +void sxe_vf_down(struct sxe_adapter *adapter); + +void sxe_bad_vf_flr(struct sxe_adapter *adapter); + +void sxe_spoof_packets_check(struct sxe_adapter *adapter); + +bool sxe_vf_tx_pending(struct sxe_adapter *adapter); + +void sxe_vf_rate_update(struct sxe_adapter *adapter); + +void sxe_link_update_notify_vf_all(struct sxe_adapter *adapter); + +void sxe_netdev_down_notify_vf_all(struct sxe_adapter *adapter); + +void sxe_vf_trust_update_notify(struct sxe_adapter *adapter, u16 index); + +void sxe_param_sriov_enable(struct sxe_adapter *adapter, u8 user_num_vfs); + +void sxe_vf_resource_release(struct sxe_adapter *adapter); + +void sxe_vf_disable(struct sxe_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.c new file mode 100644 index 000000000000..91aba6d625ae --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.c @@ -0,0 +1,1690 @@ +#include +#include +#include +#include +#include +#include + +#include "sxe.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxe_log.h" +#include "sxe_tx_proc.h" +#include "sxe_irq.h" +#include "sxe_hw.h" +#include "sxe_pci.h" +#include "sxe_debug.h" +#include "sxe_csum.h" +#include "sxe_ptp.h" +#include "sxe_monitor.h" +#include "sxe_filter.h" +#include "sxe_ipsec.h" +#include "sxe_xdp.h" + +#ifdef SXE_DRIVER_TRACE +#include "sxe_trace.h" +#endif + +#define SXE_SKB_MIN_LEN 17 + +#ifdef NEED_SKB_FRAG_SIZE_API +#define skb_frag_size(frag) skb_frag_size_compat(frag) +static inline unsigned int skb_frag_size_compat(const skb_frag_t *frag) +{ + return frag->size; +} +#endif + +void sxe_tx_ring_buffer_clean(struct sxe_ring *ring) +{ + union sxe_tx_data_desc *eop_desc, *tx_desc; + u16 ntc = ring->next_to_clean; + struct sxe_tx_buffer *tx_buffer = &ring->tx_buffer_info[ntc]; +#ifdef HAVE_AF_XDP_ZERO_COPY + if (ring->xsk_pool) { + sxe_xsk_tx_ring_clean(ring); + goto l_out; + } +#endif + while (ntc != ring->next_to_use) { +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(ring)) { + xdp_return_frame(tx_buffer->xdpf); + } else { + dev_kfree_skb_any(tx_buffer->skb); + } +#else + dev_kfree_skb_any(tx_buffer->skb); +#endif + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + eop_desc = tx_buffer->next_to_watch; + tx_desc = SXE_TX_DESC(ring, ntc); + + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + ntc++; + if (unlikely(ntc == ring->depth)) { + ntc = 0; + tx_buffer = ring->tx_buffer_info; + tx_desc = SXE_TX_DESC(ring, 0); + } + + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + } + + tx_buffer++; + ntc++; + if (unlikely(ntc == ring->depth)) { + ntc = 0; + tx_buffer = ring->tx_buffer_info; + } + } + +#ifdef HAVE_XDP_SUPPORT + if (!ring_is_xdp(ring)) +#endif + netdev_tx_reset_queue(netdev_get_tx_queue(ring->netdev, ring->idx)); +#ifdef HAVE_AF_XDP_ZERO_COPY +l_out: +#endif + ring->next_to_use = 0; + ring->next_to_clean = 0; + return; +} + +void sxe_tx_ring_free(struct sxe_ring *ring) +{ + sxe_tx_ring_buffer_clean(ring); + + if (ring->tx_buffer_info) { + vfree(ring->tx_buffer_info); + ring->tx_buffer_info = NULL; + } + + if (ring->desc.base_addr) { + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; + } + + return ; +} + +void sxe_tx_resources_free(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + if (tx_ring[i]->desc.base_addr) { + sxe_tx_ring_free(tx_ring[i]); + } + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + if (xdp_ring[i]->desc.base_addr) { + sxe_tx_ring_free(xdp_ring[i]); + } + } + + return; +} + +s32 sxe_tx_ring_alloc(struct sxe_ring *ring) +{ + s32 ret; + s32 node = NUMA_NO_NODE; + u32 size = sizeof(struct sxe_tx_buffer) * ring->depth; + struct device *dev = ring->dev; + s32 orig_node = dev_to_node(dev); + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + + if (ring->irq_data) { + node = ring->irq_data->numa_node; + } + + ring->tx_buffer_info = vmalloc_node(size, node); + if (!ring->tx_buffer_info) { + ring->tx_buffer_info = vmalloc(size); + if (!ring->tx_buffer_info) { + ret = -ENOMEM; + goto l_err; + } + } + + ring->size = ring->depth * sizeof(union sxe_tx_data_desc); + ring->size = ALIGN(ring->size, SXE_ALIGN_4K); + + set_dev_node(dev, node); + ring->desc.base_addr = dma_alloc_coherent(dev, ring->size, + &ring->desc.dma, GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!ring->desc.base_addr) { + ring->desc.base_addr = dma_alloc_coherent(dev, ring->size, + &ring->desc.dma, GFP_KERNEL); + if (!ring->desc.base_addr) { + ret = -ENOMEM; + goto l_free; + } + } + + ring->next_to_use = 0; + ring->next_to_clean = 0; + return 0; + +l_free: + vfree(ring->tx_buffer_info); + ring->tx_buffer_info = NULL; +l_err: + LOG_DEV_ERR("unable to allocate memory for the Tx descriptor ring\n"); + return ret; +} + +static s32 sxe_tx_resources_alloc(struct sxe_adapter *adapter) +{ + s32 ret; + u32 i, j; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ret = sxe_tx_ring_alloc(adapter->tx_ring_ctxt.ring[i]); + if (ret < 0) { + LOG_MSG_ERR(probe, "allocation for Tx Queue %d failed\n", i); + goto l_tx_free; + } + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++) { + ret = sxe_tx_ring_alloc(adapter->xdp_ring_ctxt.ring[j]); + if (ret < 0) { + LOG_MSG_ERR(probe, "allocation for xdp Queue %d failed\n", j); + goto l_xdp_free; + } + } + + return 0; + +l_xdp_free: + while (j--) { + sxe_tx_ring_free(adapter->xdp_ring_ctxt.ring[j]); + } + +l_tx_free: + while (i--) { + sxe_tx_ring_free(adapter->tx_ring_ctxt.ring[i]); + } + + return ret; +} + +s32 sxe_tx_ring_depth_reset(struct sxe_adapter *adapter, u32 tx_cnt) +{ + s32 ret; + u32 i, j, tx_ring_cnt; + struct sxe_ring *temp_ring; + struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + tx_ring_cnt = adapter->tx_ring_ctxt.num + adapter->xdp_ring_ctxt.num; + temp_ring = vmalloc(array_size(tx_ring_cnt, sizeof(struct sxe_ring))); + if (!temp_ring) { + LOG_ERROR_BDF("vmalloc failed, size=%lu\n", + array_size(tx_ring_cnt, sizeof(struct sxe_ring))); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + memcpy(&temp_ring[i], tx_ring[i], sizeof(struct sxe_ring)); + temp_ring[i].depth = tx_cnt; + ret = sxe_tx_ring_alloc(&temp_ring[i]); + if (ret < 0) { + LOG_ERROR_BDF("tx ring alloc failed, tx ring idx=%d\n", i); + goto l_tx_free; + } + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++, i++) { + memcpy(&temp_ring[i], xdp_ring[j], sizeof(struct sxe_ring)); + temp_ring[i].depth = tx_cnt; + ret = sxe_tx_ring_alloc(&temp_ring[i]); + if (ret < 0) { + LOG_ERROR_BDF("xdp ring alloc failed, xdp ring idx=%d\n", j); + goto l_tx_free; + } + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sxe_tx_ring_free(tx_ring[i]); + memcpy(tx_ring[i], &temp_ring[i], sizeof(struct sxe_ring)); + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++, i++) { + sxe_tx_ring_free(xdp_ring[j]); + memcpy(xdp_ring[j], &temp_ring[i], sizeof(struct sxe_ring)); + } + + adapter->tx_ring_ctxt.depth = tx_cnt; + adapter->xdp_ring_ctxt.depth = tx_cnt; + ret = 0; + goto l_temp_free; + +l_tx_free: + while (i--) { + sxe_tx_ring_free(&temp_ring[i]); + } + +l_temp_free: + vfree(temp_ring); + +l_end: + return ret; +} + +static void sxe_tx_ring_fnav_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + if (adapter->cap & SXE_FNAV_SAMPLE_ENABLE) { + ring->fnav_sample_rate = adapter->fnav_ctxt.sample_rate; + ring->fnav_sample_count = 0; + set_bit(SXE_TX_FNAV_INIT_DONE, &ring->state); + } else { + ring->fnav_sample_rate = 0; + } + + return; +} + +void sxe_tx_ring_reg_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + u32 host_thresh, prefetch_thresh; + u32 reg_idx = ring->reg_idx; + struct sxe_hw *hw = &adapter->hw; + u32 dma_len = ring->depth * sizeof(union sxe_tx_data_desc); + u32 wb_thresh = 0; + + hw->dma.ops->tx_ring_desc_configure(hw, dma_len, + (u64)ring->desc.dma, reg_idx); + ring->desc.tail = adapter->hw.reg_base_addr + SXE_TDT(reg_idx); + + if (!ring->irq_data || \ + ring->irq_data->irq_interval < SXE_IRQ_ITR_100K) { + if (adapter->irq_ctxt.rx_irq_interval) { + wb_thresh = SXE_TX_DESC_PREFETCH_THRESH_8; + } + } else { + wb_thresh = SXE_TX_DESC_PREFETCH_THRESH_8; + } + host_thresh = SXE_TX_DESC_HOST_THRESH; + prefetch_thresh = SXE_TX_DESC_WRITEBACK_THRESH; + hw->dma.ops->tx_desc_thresh_set(hw, reg_idx, + wb_thresh, host_thresh, prefetch_thresh); + + hw->dma.ops->tx_ring_tail_init(hw, reg_idx); + + hw->dma.ops->tx_ring_switch(hw, reg_idx, true); + return; +} + +static inline bool sxe_is_xdp_use_extra_pkg_buf(struct sxe_adapter *adapter) +{ + u16 num = adapter->tx_ring_ctxt.num + adapter->xdp_ring_ctxt.num; + + if (adapter->xdp_prog && (num > SXE_FNAV_RING_NUM_MAX)) { + return true; + } + + return false; +} + +static void sxe_hw_tx_buf_configure(struct sxe_adapter *adapter) +{ + u8 tcs = sxe_dcb_tc_get(adapter); + struct sxe_hw *hw = &adapter->hw; + + if (sxe_is_xdp_use_extra_pkg_buf(adapter)) { + LOG_INFO_BDF("nr cpu=%u, xdp use rings large then 64\n", nr_cpu_ids); + tcs = 4; + } + + hw->dbu.ops->tx_pkt_buf_switch(hw, false); + + hw->dbu.ops->tx_pkt_buf_size_configure(hw, tcs); + + if (adapter->cap & SXE_DCB_ENABLE) { + hw->dma.ops->tx_pkt_buf_thresh_configure(hw, tcs, true); + } else { + hw->dma.ops->tx_pkt_buf_thresh_configure(hw, tcs, false); + } + + hw->dbu.ops->tx_pkt_buf_switch(hw, true); + + return; +} + +static void sxe_tx_multi_ring_configure(struct sxe_adapter *adapter) +{ + struct sxe_hw *hw = &adapter->hw; + u8 tcs = sxe_dcb_tc_get(adapter); + bool enable = !!(adapter->cap & SXE_SRIOV_ENABLE); + u16 mask = sxe_pool_mask_get(adapter); + u16 num = adapter->tx_ring_ctxt.num + adapter->xdp_ring_ctxt.num; + + hw->dma.ops->tx_multi_ring_configure(hw, tcs, + mask, enable, num); + + return; +} + +static void sxe_tx_ring_xps_set(struct sxe_ring *ring) +{ + if (!test_and_set_bit(SXE_TX_XPS_INIT_DONE, &ring->state)) { + struct sxe_irq_data *irq_data = ring->irq_data; + + if (irq_data) { + netif_set_xps_queue(ring->netdev, + &irq_data->affinity_mask, + ring->idx); + } + } + + return; +} + +void sxe_tx_ring_attr_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ +#ifdef HAVE_AF_XDP_ZERO_COPY + ring->xsk_pool = NULL; + if (ring_is_xdp(ring)) { + LOG_DEBUG_BDF("ring[%u] is xdp and begin get xsk pool\n", ring->idx); + ring->xsk_pool = sxe_xsk_pool_get(adapter, ring); + } +#endif + sxe_tx_ring_xps_set(ring); + clear_bit(SXE_HANG_CHECK_ARMED, &ring->state); + sxe_tx_buffer_init(ring); + + sxe_tx_ring_fnav_configure(adapter, ring); + + sxe_tx_ring_reg_configure(adapter, ring); + return; +} + +void sxe_hw_tx_configure(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_hw *hw = &adapter->hw; + + sxe_tx_multi_ring_configure(adapter); + + sxe_hw_tx_buf_configure(adapter); + + hw->mac.ops->pad_enable(hw); + + hw->dma.ops->tx_enable(hw); + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sxe_tx_ring_attr_configure(adapter, adapter->tx_ring_ctxt.ring[i]); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++){ + sxe_tx_ring_attr_configure(adapter, adapter->xdp_ring_ctxt.ring[i]); + } + + return; +} + +s32 sxe_test_tx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring) +{ + s32 ret; + struct sxe_hw *hw = &adapter->hw; + + ring->depth = SXE_DEFAULT_DESC_CNT; + ring->idx = 0; + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->reg_idx = adapter->tx_ring_ctxt.ring[0]->reg_idx; + + ret = sxe_tx_ring_alloc(ring); + if (ret) { + LOG_ERROR_BDF("test tx ring alloc failed, ret=%d", ret); + goto l_end; + } + + sxe_hw_tx_buf_configure(adapter); + + hw->dma.ops->tx_enable(hw); + + sxe_tx_ring_xps_set(ring); + clear_bit(SXE_HANG_CHECK_ARMED, &ring->state); + sxe_tx_buffer_init(ring); + + sxe_tx_ring_reg_configure(adapter, ring); + + hw->dma.ops->tx_desc_wb_thresh_clear(hw, ring->reg_idx); + +l_end: + return ret; +} + +s32 sxe_tx_configure(struct sxe_adapter *adapter) +{ + s32 ret; + + ret = sxe_tx_resources_alloc(adapter); + if (ret) { + LOG_ERROR_BDF("tx ring init failed, ret = %d\n", ret); + goto l_err; + } + + sxe_hw_tx_configure(adapter); + + ret = netif_set_real_num_tx_queues(adapter->netdev, + adapter->tx_ring_ctxt.num); + if (ret) { + LOG_ERROR_BDF("netif_set_real_num_tx_queues failed, ret = %d\n", ret); + sxe_tx_resources_free(adapter); + } + +l_err: + return ret; +} + +static void sxe_tx_ctxt_desc_set(struct sxe_ring *tx_ring, + struct sxe_tx_context_desc *ctxt_desc) +{ + struct sxe_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev); + + context_desc = SXE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->depth) ? i : 0; + + ctxt_desc->type_tucmd_mlhl |= SXE_TXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(ctxt_desc->vlan_macip_lens); + context_desc->sa_idx = cpu_to_le32(ctxt_desc->sa_idx); + context_desc->type_tucmd_mlhl = cpu_to_le32(ctxt_desc->type_tucmd_mlhl); + context_desc->mss_l4len_idx = cpu_to_le32(ctxt_desc->mss_l4len_idx); + + LOG_DEBUG_BDF("contxt desc, ring=%u, ntu=%u, ntc=%u, vlan_macip_lens=%#x, " + "sa_idx=%#x, type_tucmd_mlhl=%#x, mss_l4len_idx=%x\n", + tx_ring->idx, tx_ring->next_to_use, tx_ring->next_to_clean, + context_desc->vlan_macip_lens, + context_desc->sa_idx, + context_desc->type_tucmd_mlhl, + context_desc->mss_l4len_idx); + return; +} + +STATIC s32 sxe_tso(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *first_buff, + struct sxe_tx_context_desc *ctxt_desc, + u8 *hdr_len) +{ + s32 ret; + u16 tucmd; + union sxe_ip_hdr ip; + union sxe_l4_hdr l4; + u8 *csum_start, *trans_start; + u32 mss_l4len, paylen, l4_offset, len; + struct sk_buff *skb = first_buff->skb; + struct sxe_adapter *adapter = netdev_priv(tx_ring->netdev); + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) { + ret = 0; + goto l_end; + } + + LOG_DEBUG_BDF("tso start, ring[%d]\n", tx_ring->idx); + + ret = skb_cow_head(skb, 0); + if (ret < 0) { + LOG_ERROR_BDF("skb cow head failed, ret=%d\n", ret); + goto l_end; + } + + if (eth_p_mpls(first_buff->protocol)) { + ip.hdr = skb_inner_network_header(skb); + } else { + ip.hdr = skb_network_header(skb); + } + + tucmd = SXE_TX_CTXTD_TUCMD_L4T_TCP; + + if (ip.v4->version == SXE_IPV4) { + csum_start = skb_checksum_start(skb); + trans_start = ip.hdr + (ip.v4->ihl * 4); + len = csum_start - trans_start; + + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; + + LOG_INFO_BDF("tso ipv4 ip.v4->check=%u, gso_type=%x\n", + ip.v4->check, skb_shinfo(skb)->gso_type); + tucmd |= SXE_TX_CTXTD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first_buff->tx_features |= SXE_TX_FEATURE_TSO | + SXE_TX_FEATURE_CSUM | + SXE_TX_FEATURE_IPV4; + } else { + ip.v6->payload_len = 0; + first_buff->tx_features |= SXE_TX_FEATURE_TSO | + SXE_TX_FEATURE_CSUM; + } + + l4.hdr = skb_checksum_start(skb); + l4_offset = l4.hdr - skb->data; + + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + first_buff->gso_segs = skb_shinfo(skb)->gso_segs; + first_buff->bytecount += (first_buff->gso_segs - 1) * (*hdr_len); + + sxe_ctxt_desc_iplen_set(ctxt_desc, (l4.hdr - ip.hdr)); + sxe_ctxt_desc_maclen_set(ctxt_desc, (ip.hdr - skb->data)); + sxe_ctxt_desc_tucmd_set(ctxt_desc, tucmd); + mss_l4len = (*hdr_len - l4_offset) << SXE_TX_CTXTD_L4LEN_SHIFT; + mss_l4len |= skb_shinfo(skb)->gso_size << SXE_TX_CTXTD_MSS_SHIFT; + sxe_ctxt_desc_mss_l4len_set(ctxt_desc, mss_l4len); + + ret = 1; +l_end: + return ret; +} + +static inline u16 sxe_desc_unused_count(struct sxe_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->depth) + ntc - ntu - 1; +} + +STATIC s32 sxe_maybe_stop_tx(struct sxe_ring *ring, u16 size) +{ + s32 ret = 0; + + netif_stop_subqueue(ring->netdev, ring->idx); + + smp_mb(); + + if (likely(sxe_desc_unused_count(ring) < size)) { + ret = -EBUSY; + goto l_end; + } + + netif_start_subqueue(ring->netdev, ring->idx); + + ++ring->tx_stats.restart_queue; + +l_end: + return ret; +} + +STATIC netdev_tx_t sxe_ring_maybe_stop_tx(struct sk_buff *skb, + struct sxe_ring *tx_ring) +{ + u16 i, need_num; + netdev_tx_t ret = NETDEV_TX_OK; + u16 desc_cnt = SXE_TX_DESC_USE_COUNT(skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + desc_cnt += SXE_TX_DESC_USE_COUNT( + skb_frag_size(&skb_shinfo(skb)->frags[i])); + } + + need_num = desc_cnt + SXE_TX_NON_DATA_DESC_NUM; + + if (unlikely(sxe_desc_unused_count(tx_ring) < need_num)) { + if (sxe_maybe_stop_tx(tx_ring, need_num) < 0) { + ret = NETDEV_TX_BUSY; + } + + tx_ring->tx_stats.tx_busy++; + } + + return ret; +} + +static u32 sxe_tx_cmd_type(struct sk_buff *skb, u32 flags) +{ + u32 cmd_type = SXE_TX_DESC_TYPE_DATA | + SXE_TX_DESC_IFCS; + + cmd_type |= SXE_TX_SET_FLAG(flags, SXE_TX_FEATURE_HW_VLAN, + SXE_TX_DESC_VLE); + + cmd_type |= SXE_TX_SET_FLAG(flags, SXE_TX_FEATURE_TSTAMP, + SXE_TX_DESC_TSTAMP); + + cmd_type |= SXE_TX_SET_FLAG(flags, SXE_TX_FEATURE_TSO, + SXE_TXD_DCMD_TSE); + + cmd_type ^= SXE_TX_SET_FLAG(skb->no_fcs, 1, SXE_TX_DESC_IFCS); + + return cmd_type; +} + +static void sxe_tx_desc_offload_setup(u32 flags, + unsigned int paylen, + union sxe_tx_data_desc *tx_desc) +{ + u32 olinfo_status = paylen << SXE_TX_DESC_PAYLEN_SHIFT; + + olinfo_status |= SXE_TX_SET_FLAG(flags, + SXE_TX_FEATURE_CSUM, + SXE_TXD_POPTS_TXSM); + + olinfo_status |= SXE_TX_SET_FLAG(flags, + SXE_TX_FEATURE_IPV4, + SXE_TXD_POPTS_IXSM); + +#ifdef SXE_IPSEC_CONFIGURE + olinfo_status |= SXE_TX_SET_FLAG(flags, + SXE_TX_FEATURE_IPSEC, + SXE_TXD_POPTS_IPSEC); +#endif + + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + + return; +} + +static inline void sxe_tx_desc_update( + struct sxe_ring *ring, + union sxe_tx_data_desc **desc, + u16 *next_to_use) +{ + ++(*next_to_use); + ++(*desc); + if (ring->depth == *next_to_use) { + *desc = SXE_TX_DESC(ring, 0); + *next_to_use = 0; + } + (*desc)->read.olinfo_status = 0; + + return; +} + +static void sxe_tx_dma_err(struct sxe_ring *ring, + struct sxe_tx_buffer *first_buffer, + u16 next_to_use) +{ + struct sxe_tx_buffer *tx_buffer; + + for (;;) { + tx_buffer = &ring->tx_buffer_info[next_to_use]; + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev,dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + dma_unmap_len_set(tx_buffer, len, 0); + + if (tx_buffer == first_buffer) { + break; + } + + if (next_to_use == 0) { + next_to_use += ring->depth; + } + + --next_to_use; + } + + dev_kfree_skb_any(first_buffer->skb); + first_buffer->skb = NULL; + + ring->next_to_use = next_to_use; + + return; +} + +STATIC s32 sxe_tx_desc_setup(struct sxe_ring *ring, + struct sk_buff *skb, + struct sxe_tx_buffer *first_buffer, + union sxe_tx_data_desc **desc, + u16 *next_to_use) +{ + dma_addr_t dma; + skb_frag_t *frag; + u32 map_size = skb_headlen(skb); + u32 remaining_size = skb->data_len; + u32 cmd_type = sxe_tx_cmd_type(skb, first_buffer->tx_features); + struct sxe_tx_buffer *tx_buffer = first_buffer; + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + + LOG_DEBUG_BDF("skb dma map start, line_size=%u," + " total_frag_len=%u, skb_len=%u\n", + skb_headlen(skb), skb->data_len, skb->len); + + dma = dma_map_single(ring->dev, skb->data, map_size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(ring->dev, dma)) { + LOG_DEV_ERR("tx dma map failed\n"); + goto l_dma_err; + } + dma_unmap_len_set(tx_buffer, len, map_size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + (*desc)->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(map_size > SXE_DATA_PER_DESC_SIZE_MAX)) { + (*desc)->read.cmd_type_len = \ + cpu_to_le32(cmd_type ^ SXE_DATA_PER_DESC_SIZE_MAX); + + sxe_tx_desc_update(ring, desc, next_to_use); + dma += SXE_DATA_PER_DESC_SIZE_MAX; + map_size -= SXE_DATA_PER_DESC_SIZE_MAX; + + (*desc)->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!remaining_size)) { + cmd_type |= map_size | SXE_TX_DESC_CMD; + (*desc)->read.cmd_type_len = cpu_to_le32(cmd_type); + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, remaining_size=%u, " + "desc_ptr=%p, dma_addr=%#llx, desc.buffer_addr = %#llx, " + "desc.cmdtype=%#x, desc.olinfo_status=%#x\n", + map_size, remaining_size, *desc, + (U64)dma, (*desc)->read.buffer_addr, + (*desc)->read.cmd_type_len, + (*desc)->read.olinfo_status); + break; + } + + (*desc)->read.cmd_type_len = cpu_to_le32(cmd_type ^ map_size); + + LOG_DEBUG_BDF("skb dma map, current_map_size=%u, remaining_size=%u, " + "desc_ptr=%p, dma_addr=%#llx, desc.buffer_addr = %#llx, " + "desc.cmdtype=%#x, desc.olinfo_status=%#x\n", + map_size, remaining_size, *desc, + (U64)dma, (*desc)->read.buffer_addr, + (*desc)->read.cmd_type_len, + (*desc)->read.olinfo_status); + + sxe_tx_desc_update(ring, desc, next_to_use); + + map_size = skb_frag_size(frag); + remaining_size -= map_size; + dma = skb_frag_dma_map(ring->dev, frag, 0, + map_size, DMA_TO_DEVICE); + + tx_buffer = &ring->tx_buffer_info[*next_to_use]; + } + + LOG_DEBUG_BDF("skb dma map end\n"); + return 0; +l_dma_err: + sxe_tx_dma_err(ring, first_buffer, *next_to_use); + return -ENOMEM; +} + +#ifdef NETDEV_XMIT_MORE_WORK_AROUND +#define netdev_xmit_more_workaround() (skb->xmit_more) +#endif + +STATIC s32 sxe_xmit_pkt(struct sxe_ring *ring, + struct sxe_tx_buffer *first_buffer, + const u8 hdr_len) +{ + s32 ret; + struct netdev_queue *queue; + struct sk_buff *skb = first_buffer->skb; + u32 tx_features = first_buffer->tx_features; + u16 ntu = ring->next_to_use; + union sxe_tx_data_desc *desc = SXE_TX_DESC(ring, ntu); + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + + sxe_tx_desc_offload_setup(tx_features, skb->len - hdr_len, desc); + + ret = sxe_tx_desc_setup(ring, skb, first_buffer, &desc, &ntu); + if (ret) { + goto l_end; + } + + queue = netdev_get_tx_queue(ring->netdev, ring->idx); + netdev_tx_sent_queue(queue, first_buffer->bytecount); + + first_buffer->time_stamp = jiffies; + skb_tx_timestamp(skb); + + wmb(); + + first_buffer->next_to_watch = desc; + + ntu++; + if (ntu == ring->depth) { + ntu = 0; + } + ring->next_to_use = ntu; + + if (unlikely(sxe_desc_unused_count(ring) < SXE_TX_DESC_NEEDED)) { + ret = sxe_maybe_stop_tx(ring, SXE_TX_DESC_NEEDED); + if (ret < 0) { + LOG_WARN_BDF("the desc is not enough in the ring[%u]," + "to stop the ring, " + "desc_cnt < SXE_TX_DESC_NEEDED[%u]\n", + ring->idx, (u32)SXE_TX_DESC_NEEDED); + } + } + + if (netif_xmit_stopped(queue) || +#ifdef NETDEV_XMIT_MORE_WORK_AROUND + !netdev_xmit_more_workaround() +#else + !netdev_xmit_more() +#endif + ) { + writel(ntu, ring->desc.tail); + LOG_DEBUG_BDF("send directly, ring[%u]\n", ring->idx); + } + + LOG_DEBUG_BDF("tx end: ring idx=%u, ring reg=%u, next_to_use=%d, " + "next_to_clean=%d, next_to_watch=%p\n", + ring->idx, ring->reg_idx, ring->next_to_use, + ring->next_to_clean, first_buffer->next_to_watch); + + return 0; + +l_end: + return ret; +} + +static inline struct sxe_tx_buffer *sxe_tx_first_buffer_get( + struct sk_buff *skb, + struct sxe_ring *ring) +{ + struct sxe_tx_buffer *first_buff; + + first_buff = &ring->tx_buffer_info[ring->next_to_use]; + first_buff->skb = skb; + first_buff->bytecount = skb->len; + first_buff->gso_segs = 1; + first_buff->protocol = skb->protocol; + first_buff->tx_features = 0; + + return first_buff; +} + +STATIC s32 sxe_tx_vlan_process(struct sk_buff *skb, + struct sxe_tx_buffer *first_buffer, + struct sxe_tx_context_desc *ctxt_desc) +{ + s32 ret = 0; + __be16 protocol = skb->protocol; + u32 vlan_tag = 0; + + if (skb_vlan_tag_present(skb)) { + vlan_tag = skb_vlan_tag_get(skb); + first_buffer->tx_features |= SXE_TX_FEATURE_HW_VLAN; + + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) { + ret = -SXE_ERR_PARAM; + goto l_ret; + } + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + first_buffer->tx_features |= SXE_TX_FEATURE_SW_VLAN; + } + protocol = vlan_get_protocol(skb); + + first_buffer->protocol = protocol; + + first_buffer->tx_features |= vlan_tag << SXE_TX_FEATURE_VLAN_SHIFT; + + sxe_ctxt_desc_vlan_tag_set(ctxt_desc, vlan_tag); + sxe_ctxt_desc_maclen_set(ctxt_desc, skb_network_offset(skb)); + +l_ret: + return ret; +} + +#if 0 + +static u8 *sxe_ptp_header_parse(struct sk_buff *skb, unsigned int type) +{ + u8 *ptp_hdr = NULL; + u8 *data = skb_mac_header(skb); + u32 offset = 0; + + if (type & PTP_CLASS_VLAN) { + offset += VLAN_HLEN; + } + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + offset += ETH_HLEN; + break; + default: + goto l_ret; + } + + if (skb->len + ETH_HLEN < offset + 34) { + goto l_ret; + } + + ptp_hdr = data + offset; +l_ret: + return ptp_hdr; +} + +static void sxe_tx_ptp_one_step_mode_check(struct sxe_adapter *adapter, + struct sk_buff *skb) +{ + u32 ptp_type; + u8 *ptp_hdr; + u8 ptp_msgtype; + u16 ptp_flagfield; + u32 onestep; + + ptp_type = ptp_classify_raw(skb); + if (ptp_type != PTP_CLASS_NONE) { + ptp_hdr = sxe_ptp_header_parse(skb, ptp_type); + if (ptp_hdr) { + ptp_flagfield = *(__be16 *)(ptp_hdr + + SXE_PTP_FLAGFIELD_OFFSET); + + onestep = !!!(ptp_flagfield & SXE_PTP_FLAGFIELD_TWOSTEP); + if (onestep) { + ptp_msgtype = *(u8 *)ptp_hdr; + if (ptp_msgtype == SXE_PTP_MSG_TYPE_SYNC) { + adapter->cap |= SXE_1588V2_ONE_STEP; + } + } + } + } + + return; +} +#endif + +static void sxe_tx_ptp_process(struct sxe_adapter *adapter, + struct sk_buff *skb, + struct sxe_tx_buffer *first_buffer) +{ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_ctxt.ptp_clock) { + LOG_DEBUG_BDF("need ptp stamp and ptp_clock=0x%p\n", + adapter->ptp_ctxt.ptp_clock); + if ((adapter->ptp_ctxt.tstamp_config.tx_type == HWTSTAMP_TX_ON) && + (!test_and_set_bit_lock(SXE_PTP_TX_IN_PROGRESS, + &adapter->state))) { + LOG_DEBUG_BDF("there is no other tx in ptp mode\n"); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + first_buffer->tx_features |= SXE_TX_FEATURE_TSTAMP; + + adapter->ptp_ctxt.ptp_tx_skb = skb_get(skb); + adapter->ptp_ctxt.ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_ctxt.ptp_tx_work); + } else { + adapter->stats.sw.tx_hwtstamp_skipped++; + } + } + + return; +} + +#ifdef SXE_DCB_CONFIGURE +STATIC s32 sxe_tx_dcb_process(struct sxe_adapter *adatper, + struct sk_buff *skb, + struct sxe_tx_buffer *first_buffer, + struct sxe_tx_context_desc *ctxt_desc) +{ + s32 ret = 0; + u32 vlan_tag; + struct vlan_ethhdr *vhdr; + + if (!(adatper->cap & SXE_DCB_ENABLE)) { + goto l_end; + } + + if ((first_buffer->tx_features & + (SXE_TX_FEATURE_HW_VLAN | SXE_TX_FEATURE_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL)) { + + first_buffer->tx_features &= ~SXE_TX_FEATURE_VLAN_PRIO_MASK; + first_buffer->tx_features |= (skb->priority & 0x7) << + SXE_TX_FEATURE_VLAN_PRIO_SHIFT; + vlan_tag = first_buffer->tx_features >> \ + SXE_TX_FEATURE_VLAN_SHIFT; + + if (first_buffer->tx_features & SXE_TX_FEATURE_SW_VLAN) { + if (skb_cow_head(skb, 0)) { + LOG_ERROR("skb head reallocation failed\n"); + ret = -ENOMEM; + goto l_end; + } + + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(vlan_tag); + } else { + first_buffer->tx_features |= SXE_TX_FEATURE_HW_VLAN; + sxe_ctxt_desc_vlan_tag_set(ctxt_desc, vlan_tag); + sxe_ctxt_desc_maclen_set(ctxt_desc, skb_network_offset(skb)); + } + + LOG_DEBUG("dcb enable needs to handle vlan tag, " + "vlan_tag=%x, priority=%d, tx_features=%x\n", + vlan_tag, skb->priority, first_buffer->tx_features); + } + +l_end: + return ret; +} +#endif + +STATIC s32 sxe_tx_feature_offload(struct sxe_adapter *adapter, + struct sxe_ring *ring, + struct sk_buff *skb, + struct sxe_tx_buffer *first_buffer, + u8 *hdr_len) +{ + s32 ret; + s32 need_tso; + struct sxe_tx_context_desc ctxt_desc = {0}; + + + ret = sxe_tx_vlan_process(skb, first_buffer, &ctxt_desc); + if (ret < 0) { + LOG_ERROR_BDF("ring[%u] vlan process failed\n", ring->idx); + goto l_end; + } + +#ifdef SXE_DCB_CONFIGURE + ret = sxe_tx_dcb_process(adapter, skb, first_buffer, &ctxt_desc); + if (ret) { + LOG_ERROR_BDF("ring[%u] dcb process failed\n", ring->idx); + goto l_end; + } +#endif + +#ifdef SXE_IPSEC_CONFIGURE + ret = sxe_tx_ipsec_offload(ring, first_buffer, &ctxt_desc); + if (ret) { + LOG_ERROR_BDF("ring[%u] tx ipsec offload failed.(err:%d)\n", + ring->idx, ret); + goto l_end; + } +#endif + + need_tso = sxe_tso(ring, first_buffer, &ctxt_desc, hdr_len); + if (need_tso < 0) { + LOG_ERROR_BDF("tso deal failed, ring->idx=%u\n", ring->idx); + ret = need_tso; + goto l_end; + } else if (!need_tso) { + sxe_tx_csum_offload(ring, first_buffer, &ctxt_desc); + } + + if (first_buffer->tx_features & + (SXE_TX_FEATURE_HW_VLAN | + SXE_TX_FEATURE_CC | + SXE_TX_FEATURE_CSUM | + SXE_TX_FEATURE_IPSEC | + SXE_TX_FEATURE_TSO)) { + + sxe_tx_ctxt_desc_set(ring, &ctxt_desc); + } + +l_end: + return ret; +} + +netdev_tx_t sxe_ring_xmit(struct sk_buff *skb, + struct net_device *netdev, + struct sxe_ring *ring) +{ + s32 res; + u8 hdr_len = 0; + netdev_tx_t ret = NETDEV_TX_OK; + struct sxe_tx_buffer *first_buffer; + struct sxe_adapter *adapter = netdev_priv(netdev); + + ret = sxe_ring_maybe_stop_tx(skb, ring); + if (ret != NETDEV_TX_OK) { + LOG_ERROR_BDF("tx busy, ring idx=%u\n", ring->idx); + goto l_end; + } + + first_buffer = sxe_tx_first_buffer_get(skb, ring); + + res = sxe_tx_feature_offload(adapter, ring, skb, first_buffer, &hdr_len); + if (res < 0) { + LOG_ERROR_BDF("tx offload failed, ring->idx=%u\n", ring->idx); + goto l_free; + } + + sxe_tx_ptp_process(adapter, skb, first_buffer); + + if (test_bit(SXE_TX_FNAV_INIT_DONE, &ring->state)) { + sxe_fnav_sample_rule_get(ring, first_buffer); + } + + if (sxe_xmit_pkt(ring, first_buffer, hdr_len)) { + LOG_ERROR_BDF("tx dma mapping err, ring idx=%u\n", ring->idx); + goto l_cleanup_tx_timestamp; + } + return NETDEV_TX_OK; + +l_free: + dev_kfree_skb_any(first_buffer->skb); + first_buffer->skb = NULL; +l_cleanup_tx_timestamp: + if (unlikely(first_buffer->tx_features & SXE_TX_FEATURE_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_ctxt.ptp_tx_skb); + adapter->ptp_ctxt.ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_ctxt.ptp_tx_work); + clear_bit_unlock(SXE_PTP_TX_IN_PROGRESS, &adapter->state); + } +l_end: + return ret; +} + +netdev_tx_t sxe_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + netdev_tx_t ret; + struct sxe_adapter *adapter = netdev_priv(netdev); + struct sxe_ring *tx_ring; + + if (skb_put_padto(skb, SXE_SKB_MIN_LEN)) { + ret = NETDEV_TX_OK; + goto l_end; + } + + tx_ring = adapter->tx_ring_ctxt.ring[skb_get_queue_mapping(skb)]; + LOG_DEBUG_BDF("sxe xmit start, ring idx=%u, ring_reg=%u\n", + tx_ring->idx, tx_ring->reg_idx); + +#ifdef SXE_DRIVER_TRACE + SXE_TRACE_TX(tx_ring->idx, SXE_TRACE_LAB_TX_START); +#endif + + SKB_DUMP(skb); + + ret = sxe_ring_xmit(skb, netdev, tx_ring); + if (ret) { + LOG_ERROR_BDF("sxe xmit failed, ring idx=%u, status=%x\n", + tx_ring->idx, ret); + } else { + LOG_DEBUG_BDF("sxe xmit end, ring idx=%u\n", tx_ring->idx); + } + +#ifdef SXE_DRIVER_TRACE + if(!ret) { + SXE_TRACE_TX(tx_ring->idx, SXE_TRACE_LAB_TX_END); + } +#endif + +l_end: + return ret; +} + +static inline void sxe_tx_skb_unmap(struct sxe_ring *ring, + struct sxe_tx_buffer *tx_buffer) +{ + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + return; +} + +static inline void sxe_tx_desc_buf_update( + struct sxe_ring *ring, + struct sxe_tx_buffer **tx_buffer, + union sxe_tx_data_desc **tx_desc, + u32 *next_to_clean) +{ + (*tx_buffer)++; + (*tx_desc)++; + ++(*next_to_clean); + if (unlikely(!(*next_to_clean))) { + *next_to_clean -= ring->depth; + *tx_buffer = ring->tx_buffer_info; + *tx_desc = SXE_TX_DESC(ring, 0); + } + + return; +} + +static void sxe_tx_ring_unmap(struct sxe_ring *ring, + s32 napi_budget, + u16 *budget, + struct sxe_ring_stats *ring_stats) +{ + union sxe_tx_data_desc *tx_desc; + union sxe_tx_data_desc *eop_desc; + u32 next_to_clean = ring->next_to_clean; + struct sxe_tx_buffer *tx_buffer; + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + + tx_buffer = &ring->tx_buffer_info[next_to_clean]; + tx_desc = SXE_TX_DESC(ring, next_to_clean); + next_to_clean -= ring->depth; + + LOG_DEBUG_BDF("tx ring clean start: ring idx=%u, reg_idx=%u, next_to_use=%d, " + "next_to_clean=%d, budget=%d, next_to_watch=%p, " + "desc.wb.nxtseq_seed=%#08x, desc.wb.status=%#08x\n", + ring->idx, ring->idx, ring->next_to_use, ring->next_to_clean, + *budget, tx_buffer->next_to_watch, tx_desc->wb.nxtseq_seed, + tx_desc->wb.status); + + do { + eop_desc = tx_buffer->next_to_watch; + if (!eop_desc) { + break; + } + + smp_rmb(); + + if (!(eop_desc->wb.status & cpu_to_le32(SXE_TX_DESC_STAT_DD))) { + break; + } + + tx_buffer->next_to_watch = NULL; + + ring_stats->bytes += tx_buffer->bytecount; + ring_stats->packets += tx_buffer->gso_segs; +#ifdef SXE_IPSEC_CONFIGURE + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + if (tx_buffer->tx_features & SXE_TX_FEATURE_IPSEC) { + adapter->stats.sw.tx_ipsec++; + } +#endif + +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(ring)) { + xdp_return_frame(tx_buffer->xdpf); + } else { + napi_consume_skb(tx_buffer->skb, napi_budget); + } +#else + napi_consume_skb(tx_buffer->skb, napi_budget); +#endif + LOG_DEBUG_BDF("tx ring clean: budget=%d, bytes=%llu, packet=%llu\n", + *budget, ring_stats->bytes, ring_stats->packets); + + sxe_tx_skb_unmap(ring, tx_buffer); + while (tx_desc != eop_desc) { + sxe_tx_desc_buf_update(ring, &tx_buffer, + &tx_desc, &next_to_clean); + + if (dma_unmap_len(tx_buffer, len)) { + sxe_tx_skb_unmap(ring, tx_buffer); + } + } + sxe_tx_desc_buf_update(ring, &tx_buffer, + &tx_desc, &next_to_clean); + + prefetch(tx_desc); + + --*budget; + }while (likely(*budget)); + + next_to_clean += ring->depth; + ring->next_to_clean = next_to_clean; + LOG_DEBUG_BDF("tx ring clean end: ring idx=%u, reg_idx=%u, next_to_use=%d, " + "next_to_clean=%d, budget=%d\n", + ring->idx, ring->reg_idx, ring->next_to_use, + ring->next_to_clean, *budget); + + return; +} + +bool sxe_tx_ring_pending(struct sxe_adapter *adapter) +{ + u32 i; + bool ret = false; + struct sxe_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxe_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + if (tx_ring[i]->next_to_use != tx_ring[i]->next_to_clean) { + ret = true; + LOG_DEV_DEBUG("tx ring %d, next_to_use %d, next_to_clean %d, pending.\n", + i, tx_ring[i]->next_to_use, tx_ring[i]->next_to_clean); + goto l_end; + } + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + if (xdp_ring[i]->next_to_use != xdp_ring[i]->next_to_clean) { + ret = true; + LOG_DEV_DEBUG("tx ring %d, next_to_use %d, next_to_clean %d, pending.\n", + i, xdp_ring[i]->next_to_use, xdp_ring[i]->next_to_clean); + goto l_end; + } + } + +l_end: + return ret; +} + +static u64 sxe_tx_ring_pending_get(struct sxe_ring *ring) +{ + u32 head, tail; + + head = ring->next_to_clean; + tail = ring->next_to_use; + + return ((head <= tail) ? tail : tail + ring->depth) - head; +} + +static inline bool sxe_detect_tx_hang(struct sxe_ring *ring) +{ + bool ret; + u32 tx_done = ring->stats.packets; + u32 tx_done_old = ring->tx_stats.tx_done_old; + u32 tx_pending = sxe_tx_ring_pending_get(ring); + + SXE_TX_HANG_CHECK_COMPLETE(ring); + + if (tx_done_old == tx_done && tx_pending) { + ret = test_and_set_bit(SXE_HANG_CHECK_ARMED, &ring->state); + goto l_end; + } + + ring->tx_stats.tx_done_old = tx_done; + + clear_bit(SXE_HANG_CHECK_ARMED, &ring->state); + + ret = false; +l_end: + return ret; +} + +static void sxe_tx_timeout_reset(struct sxe_adapter *adapter) +{ + if (!test_bit(SXE_DOWN, &adapter->state)) { + set_bit(SXE_RESET_REQUESTED, &adapter->monitor_ctxt.state); + LOG_MSG_WARN(drv, "initiating reset due to tx timeout\n"); + sxe_monitor_work_schedule(adapter); + } + + return; +} + +static bool sxe_blocked_tx_ring_find(struct sxe_adapter *adapter) +{ + u32 i; + struct net_device *netdev = adapter->netdev; + struct netdev_queue *q; + unsigned long trans_start; + + for (i = 0; i < netdev->num_tx_queues; i++) { + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && \ + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + LOG_WARN_BDF("tx_timeout: netdev hang queue %d found\n", i); + return true; + } + } + + LOG_INFO_BDF("tx_timeout: no netdev hang queue found\n"); + return false; +} + +#ifdef HAVE_TIMEOUT_TXQUEUE_IDX +void sxe_tx_timeout(struct net_device *netdev, u32 __always_unused txqueue) +#else +void sxe_tx_timeout(struct net_device *netdev) +#endif +{ + struct sxe_adapter *adapter = netdev_priv(netdev); + + if (!sxe_blocked_tx_ring_find(adapter)) { + goto l_end; + } + + sxe_tx_timeout_reset(adapter); + +l_end: + return; +} + +static inline bool sxe_tx_hang_handle(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + u32 tdh, tdt; + bool ret = false; + struct sxe_hw *hw = &adapter->hw; + + if (SXE_DETECT_TX_HANG_NEED(ring) && sxe_detect_tx_hang(ring)) { +#ifdef HAVE_XDP_SUPPORT + if (!ring_is_xdp(ring)) +#endif + netif_stop_subqueue(ring->netdev, ring->idx); + + hw->dma.ops->tx_ring_info_get(hw, ring->reg_idx, &tdh, &tdt); + LOG_MSG_ERR(drv, "detected Tx hang %s\n" + " Tx ring <%u>\n" + " ring reg <%u>\n" + " TDH, TDT <%u>, <%u>\n" + " next_to_use <%u>\n" + " next_to_clean <%u>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%llx>\n", + ring_is_xdp(ring) ? "(XDP)" : "", + ring->idx, ring->reg_idx, + tdh, tdt, + ring->next_to_use, ring->next_to_clean, + ring->tx_buffer_info[ring->next_to_clean].time_stamp, + (U64)jiffies); + LOG_MSG_INFO(probe, "tx hang %llu detected on queue %d, resetting adapter\n", + adapter->stats.sw.reset_work_trigger_cnt + 1, + ring->idx); + + sxe_tx_timeout_reset(adapter); + + ret = true; + } + + return ret; +} + +static inline void sxe_tx_pkt_stats_update(struct sxe_irq_rate *irq_rate, + struct sxe_ring *ring, + struct sxe_ring_stats *stats) +{ + u64_stats_update_begin(&ring->syncp); + ring->stats.bytes += stats->bytes; + ring->stats.packets += stats->packets; + u64_stats_update_end(&ring->syncp); + irq_rate->total_bytes += stats->bytes; + irq_rate->total_packets += stats->packets; + +#ifdef SXE_IPSEC_CONFIGURE + struct sxe_adapter *adapter = netdev_priv(ring->netdev); + adapter->ipsec.rx_ipsec += adapter->stats.sw.rx_ipsec; +#endif + + return; +} + +bool sxe_tx_ring_irq_clean(struct sxe_irq_data *irq, + struct sxe_ring *ring, s32 napi_budget) +{ + bool ret; + struct sxe_adapter *adapter = irq->adapter; + struct netdev_queue *queue; + struct sxe_ring_stats ring_stats = {}; + u16 budget = irq->tx.work_limit; + + if (test_bit(SXE_DOWN, &adapter->state)) { + ret = true; + goto l_end; + } + + sxe_tx_ring_unmap(ring, napi_budget, &budget, &ring_stats); + + sxe_tx_pkt_stats_update(&irq->tx.irq_rate, ring, &ring_stats); + + if (sxe_tx_hang_handle(adapter, ring)) { + ret = true; + goto l_end; + } + +#ifdef HAVE_XDP_SUPPORT + if (ring_is_xdp(ring)) { + ret = !!budget; + goto l_end; + } +#endif + queue = netdev_get_tx_queue(ring->netdev, ring->idx); + netdev_tx_completed_queue(queue, ring_stats.packets, ring_stats.bytes); + + if (unlikely(ring_stats.packets && + netif_carrier_ok(ring->netdev) && + (sxe_desc_unused_count(ring) >= \ + SXE_TX_WAKE_THRESHOLD))) { + smp_mb(); + + if (__netif_subqueue_stopped(ring->netdev, ring->idx) + && !test_bit(SXE_DOWN, &adapter->state)) { + netif_wake_subqueue(ring->netdev, ring->idx); + ++ring->tx_stats.restart_queue; + LOG_WARN_BDF("\n\n ring idx=%u, wake_up\n\n", ring->idx); + } + } + + ret = !!budget; + +l_end: + return ret; +} + +void sxe_hw_tx_disable(struct sxe_adapter *adapter) +{ + u8 reg_idx; + bool link_up; + u32 i, wait_loop, txdctl, link_speed; + struct sxe_ring *ring; + unsigned long wait_delay, delay_interval; + struct sxe_hw *hw = &adapter->hw; + + LOG_DEBUG_BDF("tx hw disable\n"); + + if (sxe_is_hw_fault(hw)) { + goto l_end; + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ring = adapter->tx_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + hw->dma.ops->tx_ring_switch_not_polling(hw, reg_idx, false); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + ring = adapter->xdp_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + hw->dma.ops->tx_ring_switch_not_polling(hw, reg_idx, false); + } + + sxe_link_info_get(adapter, &link_speed, &link_up); + if (!link_up) { + goto l_end; + } + + delay_interval = sxe_pcie_timeout_poll(adapter->pdev, hw); + wait_delay = delay_interval; + + wait_loop = SXE_MAX_TXRX_DESC_POLL; + while (wait_loop--) { + usleep_range(wait_delay, wait_delay + 10); + wait_delay += delay_interval * 2; + txdctl = 0; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ring = adapter->tx_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + txdctl |= hw->dma.ops->tx_desc_ctrl_get(hw, reg_idx); + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + ring = adapter->xdp_ring_ctxt.ring[i]; + reg_idx = ring->reg_idx; + + txdctl |= hw->dma.ops->tx_desc_ctrl_get(hw, reg_idx); + } + + if (!(txdctl & SXE_TXDCTL_ENABLE)) { + goto l_end; + } + } + + LOG_MSG_ERR(drv, "TXDCTL.ENABLE for one or more queues not cleared within the polling period\n"); + +l_end: + return; +} + +void sxe_tx_buffer_dump(struct sxe_adapter *adapter) +{ + u32 i; + struct sxe_ring *ring; + struct sxe_tx_buffer *tx_buffer; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ring = adapter->tx_ring_ctxt.ring[i]; + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + + LOG_DEV_INFO(" %5d %5X %5X %016llX %08X %p %016llX\n", + i, ring->next_to_use, ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + ring = adapter->xdp_ring_ctxt.ring[i]; + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + + LOG_DEV_INFO(" %5d %5X %5X %016llX %08X %p %016llX\n", + i, ring->next_to_use, ring->next_to_clean, + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.h new file mode 100644 index 000000000000..cba42b1e4515 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_tx_proc.h @@ -0,0 +1,153 @@ +#ifndef __SXE_TX_PROC_H__ +#define __SXE_TX_PROC_H__ + +#include "sxe.h" +#include "sxe_ring.h" + +#define SXE_IPV4 (4) +#define SXE_IPV6 (6) +#define SXE_ALIGN_4K (4096) + +#define SXE_TX_FEATURE_VLAN_PRIO_MASK 0xe0000000 +#define SXE_TX_FEATURE_VLAN_PRIO_SHIFT 29 +#define SXE_TX_FEATURE_VLAN_SHIFT 16 + +enum sxe_tx_features { + SXE_TX_FEATURE_HW_VLAN = 0x01, + SXE_TX_FEATURE_TSO = 0x02, + SXE_TX_FEATURE_TSTAMP = 0x04, + + SXE_TX_FEATURE_CC = 0x08, + SXE_TX_FEATURE_IPV4 = 0x10, + SXE_TX_FEATURE_CSUM = 0x20, + SXE_TX_FEATURE_IPSEC = 0x40, + SXE_TX_FEATURE_SW_VLAN = 0x80, +}; + +#define SXE_TX_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +union sxe_ip_hdr { + struct iphdr *v4; + struct ipv6hdr *v6; + u8 *hdr; +}; +union sxe_l4_hdr { + struct tcphdr *tcp; + u8 *hdr; +}; + +union app_tr_data_hdr { + u8 *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; +}; + +int sxe_tx_configure(struct sxe_adapter *adapter); + +void sxe_tx_ring_buffer_clean(struct sxe_ring *ring); + +bool sxe_tx_ring_irq_clean(struct sxe_irq_data *irq, + struct sxe_ring *ring, s32 napi_budget); + +netdev_tx_t sxe_xmit(struct sk_buff *skb, struct net_device *netdev); + +s32 sxe_tx_ring_alloc(struct sxe_ring *ring); + +void sxe_tx_ring_free(struct sxe_ring *ring); + +void sxe_tx_resources_free(struct sxe_adapter *adapter); + +s32 sxe_tx_ring_depth_reset(struct sxe_adapter *adapter, u32 tx_cnt); + +void sxe_hw_tx_disable(struct sxe_adapter *adapter); + +void sxe_hw_tx_configure(struct sxe_adapter *adapter); + +#ifdef HAVE_TIMEOUT_TXQUEUE_IDX +void sxe_tx_timeout(struct net_device *netdev, u32 __always_unused txqueue); +#else +void sxe_tx_timeout(struct net_device *netdev); +#endif + +bool sxe_tx_ring_pending(struct sxe_adapter *adapter); + +void sxe_tx_buffer_dump(struct sxe_adapter *adapter); + +void sxe_tx_ring_attr_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring); + +void sxe_tx_ring_reg_configure(struct sxe_adapter *adapter, + struct sxe_ring *ring); + +netdev_tx_t sxe_ring_xmit(struct sk_buff *skb, + struct net_device *netdev, + struct sxe_ring *ring); + +s32 sxe_test_tx_configure(struct sxe_adapter *adapter, struct sxe_ring *ring); + +static inline void sxe_tx_buffer_init(struct sxe_ring *ring) +{ + memset(ring->tx_buffer_info, 0, + sizeof(struct sxe_tx_buffer) * ring->depth); + return; +} + +static inline void sxe_ctxt_desc_iplen_set( + struct sxe_tx_context_desc *ctxt_desc, u32 iplen) +{ + ctxt_desc->vlan_macip_lens |= iplen; + return; +} + +static inline void sxe_ctxt_desc_maclen_set( + struct sxe_tx_context_desc *ctxt_desc, u32 maclen) +{ + ctxt_desc->vlan_macip_lens &= ~SXE_TX_CTXTD_MACLEN_MASK; + ctxt_desc->vlan_macip_lens |= maclen << SXE_TX_CTXTD_MACLEN_SHIFT; + return; +} + +static inline void sxe_ctxt_desc_vlan_tag_set( + struct sxe_tx_context_desc *ctxt_desc, u32 vlan_tag) +{ + ctxt_desc->vlan_macip_lens |= vlan_tag << SXE_TX_CTXTD_VLAN_SHIFT; + return; +} + +static inline void sxe_ctxt_desc_tucmd_set( + struct sxe_tx_context_desc *ctxt_desc, u32 tucmd) +{ + ctxt_desc->type_tucmd_mlhl |= tucmd; + return; +} + +static inline void sxe_ctxt_desc_sa_idx_set( + struct sxe_tx_context_desc *ctxt_desc, u32 sa_idx) +{ + ctxt_desc->sa_idx = sa_idx; + return; +} + +static inline void sxe_ctxt_desc_mss_l4len_set( + struct sxe_tx_context_desc *ctxt_desc, u32 mss_l4len) +{ + ctxt_desc->mss_l4len_idx = mss_l4len; + return; +} + +static inline __be16 sxe_ctxt_desc_vlan_tag_get( + struct sxe_tx_context_desc *ctxt_desc) +{ + return (ctxt_desc->vlan_macip_lens >> SXE_TX_CTXTD_VLAN_SHIFT); +} + +static inline void sxe_tx_release(struct sxe_adapter *adapter) +{ + sxe_tx_resources_free(adapter); + + return; +} +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.c b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.c new file mode 100644 index 000000000000..30a0c1f36fd9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.c @@ -0,0 +1,1499 @@ +#include "sxe_xdp.h" +#include "sxe_compat.h" +#ifdef HAVE_XDP_SUPPORT +#include "sxe_rx_proc.h" +#include "sxe_pci.h" +#include "sxe_netdev.h" +#include "sxe_tx_proc.h" + +DEFINE_STATIC_KEY_FALSE(sxe_xdp_tx_lock_key); + +u32 sxe_max_xdp_frame_size(struct sxe_adapter *adapter) +{ + u32 size; + + if (PAGE_SIZE >= SXE_PAGE_SIZE_8KB || adapter->cap & SXE_RX_LEGACY) + size = SXE_RXBUFFER_2K; + else + size = SXE_RXBUFFER_3K; + + return size; +} + +static s32 sxe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + s32 i; + u32 frame_size = dev->mtu + SXE_ETH_DEAD_LOAD; + struct sxe_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + bool need_reset; + s32 ret = 0; + + if (adapter->cap & (SXE_SRIOV_ENABLE | SXE_DCB_ENABLE)) { + LOG_ERROR_BDF("sr_iov does not compatiable with xdp or dcb\n"); + ret = -EINVAL; + goto l_ret; + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + struct sxe_ring *ring = adapter->rx_ring_ctxt.ring[i]; + + if (ring_is_lro_enabled(ring)) { + LOG_ERROR_BDF("rsc does not compatiable with xdp\n"); + ret = -EINVAL; + goto l_ret; + } + + if (frame_size > sxe_rx_bufsz(ring)) { + LOG_ERROR_BDF("xdp frame_size = %u > rx buf size=%u\n", + frame_size, sxe_rx_bufsz(ring)); + ret = -EINVAL; + goto l_ret; + } + } + + if (nr_cpu_ids > SXE_XDP_RING_NUM_MAX * 2) { + LOG_ERROR_BDF("nr_cpu_ids=%u > max xdp ring=%u\n", + nr_cpu_ids, SXE_XDP_RING_NUM_MAX); + ret = -ENOMEM; + goto l_ret; + } else if (nr_cpu_ids > SXE_XDP_RING_NUM_MAX) { + static_branch_inc(&sxe_xdp_tx_lock_key); + } + + old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); + + LOG_DEBUG_BDF("xdp setup need reset:%s\n",need_reset ? "yes" : "no"); + if (need_reset) { + ret = sxe_ring_reassign(adapter, sxe_dcb_tc_get(adapter)); + if (ret) { + LOG_ERROR_BDF("reassign ring err, ret=%d\n",ret); + rcu_assign_pointer(adapter->xdp_prog, old_prog); + ret = -EINVAL; + goto l_ret; + } + } else { + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + (void)xchg(&adapter->rx_ring_ctxt.ring[i]->xdp_prog, + adapter->xdp_prog); + } + } + + if (old_prog) { + bpf_prog_put(old_prog); + } + +#ifdef HAVE_AF_XDP_ZERO_COPY + if (need_reset && prog) { + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + if (adapter->xdp_ring_ctxt.ring[i]->xsk_pool) { +#ifdef HAVE_NDO_XSK_WAKEUP + (void)sxe_xsk_wakeup(adapter->netdev, i, + XDP_WAKEUP_RX); +#else + (void)sxe_xsk_async_xmit(adapter->netdev, i); +#endif + } + } + } +#endif +l_ret: + return ret; +} + +#ifndef BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +static inline void +bpf_warn_invalid_xdp_action_compat(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + bpf_warn_invalid_xdp_action_compat(dev, prog, act) + +#endif + +static s32 sxe_xdp_ring_xmit(struct sxe_ring *ring, + struct xdp_frame *xdpf) +{ + struct sxe_tx_buffer *tx_buffer; + union sxe_tx_data_desc *tx_desc; + u32 len, cmd_type; + dma_addr_t dma; + u16 i; + s32 ret = SXE_XDP_TX; + + len = xdpf->len; + + LOG_DEBUG("xdp ring[%u] xmit data, len=%u\n", ring->idx, len); + if (unlikely(!sxe_desc_unused(ring))) { + LOG_ERROR("the unused desc is 0\n"); + ret = SXE_XDP_CONSUMED; + goto l_ret; + } + + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + LOG_ERROR("dma mapping error\n"); + ret = SXE_XDP_CONSUMED; + goto l_ret; + } + + tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + i = ring->next_to_use; + tx_desc = SXE_TX_DESC(ring, i); + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->xdpf = xdpf; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + cmd_type = SXE_TX_DESC_TYPE_DATA | + SXE_TX_DESC_DEXT | + SXE_TX_DESC_IFCS; + cmd_type |= len | SXE_TX_DESC_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << SXE_TX_DESC_PAYLEN_SHIFT); + + smp_wmb(); + + i++; + if (i == ring->depth) { + i = 0; + } + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + +l_ret: + return ret; +} + +static inline void sxe_xdp_ring_tail_update(struct sxe_ring *ring) +{ + wmb(); + writel(ring->next_to_use, ring->desc.tail); +} + +void sxe_xdp_ring_tail_update_locked(struct sxe_ring *ring) +{ + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_lock(&ring->tx_lock); + sxe_xdp_ring_tail_update(ring); + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_unlock(&ring->tx_lock); + +} + +struct sk_buff *sxe_xdp_run(struct sxe_adapter *adapter, + struct sxe_ring *rx_ring, + struct xdp_buff *xdp) +{ + s32 ret, act; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + struct sxe_ring *ring; + s32 result = SXE_XDP_PASS; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) { + LOG_INFO_BDF("xdp prog is NULL\n"); + goto xdp_out; + } + + prefetchw(xdp->data_hard_start); + + act = bpf_prog_run_xdp(xdp_prog, xdp); + LOG_DEBUG_BDF("xdp run act=0x%x\n", act); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + result = SXE_XDP_CONSUMED; + break; + } + + ring = sxe_xdp_tx_ring_pick(adapter); + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_lock(&ring->tx_lock); + result = sxe_xdp_ring_xmit(ring, xdpf); + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + ret = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); + if (!ret) { + result = SXE_XDP_REDIR; + } else { + result = SXE_XDP_CONSUMED; + } + + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = SXE_XDP_CONSUMED; + break; + } + +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} + +int sxe_xdp_xmit(struct net_device *dev, int budget, + struct xdp_frame **frames, u32 flags) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_ring *ring; +#ifdef XDP_XMIT_FRAME_FAILED_NEED_FREE + int drops = 0; +#else + int num_xmit = 0; +#endif + u32 i; + int ret; + + if (unlikely(test_bit(SXE_DOWN, &adapter->state))) { + ret = -ENETDOWN; + LOG_ERROR_BDF("adapter state:down\n"); + goto l_ret; + } + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; + LOG_ERROR_BDF("xdp flag not set\n"); + goto l_ret; + } + + ring = adapter->xdp_prog ? sxe_xdp_tx_ring_pick(adapter) : NULL; + if (unlikely(!ring)) { + ret = -ENXIO; + LOG_ERROR_BDF("xdp ring is not config finish yet\n"); + goto l_ret; + } + + if (unlikely(test_bit(SXE_TX_DISABLED, &ring->state))) { + ret = -ENXIO; + LOG_ERROR_BDF("ring state:disabled\n"); + goto l_ret; + } + + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) { + spin_lock(&ring->tx_lock); + } + LOG_DEBUG_BDF("start xdp xmit: ring idx=%u, budget=%u", ring->idx, budget); + + for (i = 0; i < budget; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = sxe_xdp_ring_xmit(ring, xdpf); +#ifdef XDP_XMIT_FRAME_FAILED_NEED_FREE + if (err != SXE_XDP_TX) { + LOG_INFO_BDF("xdp ring[%u] xmit drop frame[%u]\n", + ring->idx, i); + xdp_return_frame_rx_napi(xdpf); + drops++; + } +#else + if (err != SXE_XDP_TX) + break; + num_xmit++; +#endif + } + + if (unlikely(flags & XDP_XMIT_FLUSH)) { + sxe_xdp_ring_tail_update(ring); + } + + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) { + spin_unlock(&ring->tx_lock); + } +#ifdef XDP_XMIT_FRAME_FAILED_NEED_FREE + ret = budget - drops; +#else + ret = num_xmit; +#endif + +l_ret: + return ret; +} + +#ifdef HAVE_AF_XDP_ZERO_COPY +static void sxe_tx_ring_disable(struct sxe_adapter *adapter, + struct sxe_ring *tx_ring) +{ + struct sxe_hw *hw = &adapter->hw; + unsigned long timeout = sxe_get_completion_timeout(adapter); + + set_bit(SXE_TX_DISABLED, &tx_ring->state); + hw->dbu.ops->tx_ring_disable(hw, tx_ring->reg_idx, timeout); + return; +} + +static void sxe_tx_ring_stats_reset(struct sxe_ring *ring) +{ + memset(&ring->stats, 0, sizeof(ring->stats)); + memset(&ring->tx_stats, 0, sizeof(ring->tx_stats)); + return; +} + +static void sxe_rx_ring_stats_reset(struct sxe_ring *ring) +{ + memset(&ring->stats, 0, sizeof(ring->stats)); + memset(&ring->rx_stats, 0, sizeof(ring->rx_stats)); + return; +} + +static void sxe_txrx_ring_disable(struct sxe_adapter *adapter, + u32 ring_idx) +{ + struct sxe_ring *rx_ring, *tx_ring, *xdp_ring; + struct sxe_hw *hw = &adapter->hw; + unsigned long timeout = sxe_get_completion_timeout(adapter); + + rx_ring = adapter->rx_ring_ctxt.ring[ring_idx]; + tx_ring = adapter->tx_ring_ctxt.ring[ring_idx]; + xdp_ring = adapter->xdp_ring_ctxt.ring[ring_idx]; + + sxe_tx_ring_disable(adapter, tx_ring); + if (xdp_ring) { + sxe_tx_ring_disable(adapter, xdp_ring); + } + + hw->dbu.ops->rx_ring_disable(hw, rx_ring->reg_idx, timeout); + + if (xdp_ring) { + synchronize_rcu(); + } + + napi_disable(&rx_ring->irq_data->napi); + + sxe_tx_ring_buffer_clean(tx_ring); + if (xdp_ring) { + sxe_tx_ring_buffer_clean(xdp_ring); + } + + sxe_rx_ring_buffer_clean(rx_ring); + + sxe_tx_ring_stats_reset(tx_ring); + if (xdp_ring) { + sxe_tx_ring_stats_reset(xdp_ring); + } + + sxe_rx_ring_stats_reset(rx_ring); + + return; +} + +void sxe_txrx_ring_enable(struct sxe_adapter *adapter, u32 ring_idx) +{ + struct sxe_ring *rx_ring, *tx_ring, *xdp_ring; + + rx_ring = adapter->rx_ring_ctxt.ring[ring_idx]; + tx_ring = adapter->tx_ring_ctxt.ring[ring_idx]; + xdp_ring = adapter->xdp_ring_ctxt.ring[ring_idx]; + + sxe_tx_ring_attr_configure(adapter, tx_ring); + + if (xdp_ring) { + sxe_tx_ring_attr_configure(adapter, xdp_ring); + } + + sxe_rx_ring_attr_configure(adapter, rx_ring); + + clear_bit(SXE_TX_DISABLED, &tx_ring->state); + if (xdp_ring) { + clear_bit(SXE_TX_DISABLED, &xdp_ring->state); + } + + napi_enable(&rx_ring->irq_data->napi); + + return; +} + +static void sxe_irq_queues_rearm(struct sxe_adapter *adapter, u64 qmask) +{ + struct sxe_hw *hw = &adapter->hw; + + hw->irq.ops->ring_irq_trigger(hw, qmask); + return; +} + +static void sxe_xdp_tx_buffer_clean(struct sxe_ring *tx_ring, + struct sxe_tx_buffer *tx_buffer) +{ + xdp_return_frame(tx_buffer->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); +} + +static bool sxe_zc_xdp_ring_xmit(struct sxe_ring *xdp_ring, u32 budget) +{ + union sxe_tx_data_desc *tx_desc = NULL; + struct sxe_tx_buffer *tx_bi; +#ifndef XSK_UMEM_CONSUME_TX_NEED_3_PARAMS + struct xdp_desc desc; +#endif + dma_addr_t dma; + u32 len; + u32 cmd_type; + bool work_done = true; + + LOG_DEBUG("entry xdp zc ring xmit: ring[%u], budget=%u\n", + xdp_ring->idx, budget); + + while (budget-- > 0) { + if (unlikely(!sxe_desc_unused(xdp_ring))) { + work_done = false; + break; + } + + if (!netif_carrier_ok(xdp_ring->netdev)) { + break; + } + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) + break; + + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); + len = desc.len; + + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, len); +#else +#ifndef XSK_UMEM_CONSUME_TX_NEED_3_PARAMS + if (!xsk_umem_consume_tx(xdp_ring->xsk_pool, &desc)) { + break; + } + + dma = xdp_umem_get_dma(xdp_ring->xsk_pool, desc.addr); + len = desc.len; +#else + if (!xsk_umem_consume_tx(xdp_ring->xsk_pool, &dma, &len)) { + break; + } +#endif + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); +#endif + + tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + tx_bi->xdpf = NULL; + tx_bi->gso_segs = 1; + + tx_desc = SXE_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + cmd_type = SXE_TX_DESC_TYPE_DATA | + SXE_TX_DESC_DEXT | + SXE_TX_DESC_IFCS; + cmd_type |= len | SXE_TX_DESC_CMD; + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32(len << SXE_TX_DESC_PAYLEN_SHIFT); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->depth) { + xdp_ring->next_to_use = 0; + } + + } + + if (tx_desc) { + sxe_xdp_ring_tail_update(xdp_ring); + xsk_tx_release(xdp_ring->xsk_pool); + } + + return !!budget && work_done; +} + +bool sxe_xdp_tx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *xdp_ring, int napi_budget) +{ + u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use; + u32 total_packets = 0, total_bytes = 0; + union sxe_tx_data_desc *tx_desc; + struct sxe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + struct sxe_adapter *adapter = irq_data->adapter; + + LOG_DEBUG_BDF("entry xdp tx irq: ring[%u], ntc=%u, ntu=%u\n", + xdp_ring->idx, ntc, ntu); + tx_bi = &xdp_ring->tx_buffer_info[ntc]; + tx_desc = SXE_TX_DESC(xdp_ring, ntc); + + while (ntc != ntu) { + if (!(tx_desc->wb.status & cpu_to_le32(SXE_TX_DESC_STAT_DD))) { + break; + } + + total_bytes += tx_bi->bytecount; + total_packets += tx_bi->gso_segs; + + if (tx_bi->xdpf) { + sxe_xdp_tx_buffer_clean(xdp_ring, tx_bi); + } else { + xsk_frames++; + } + + tx_bi->xdpf = NULL; + + tx_bi++; + tx_desc++; + ntc++; + if (unlikely(ntc == xdp_ring->depth)) { + ntc = 0; + tx_bi = xdp_ring->tx_buffer_info; + tx_desc = SXE_TX_DESC(xdp_ring, 0); + } + + prefetch(tx_desc); + } + + xdp_ring->next_to_clean = ntc; + + u64_stats_update_begin(&xdp_ring->syncp); + xdp_ring->stats.bytes += total_bytes; + xdp_ring->stats.packets += total_packets; + u64_stats_update_end(&xdp_ring->syncp); + + irq_data->tx.irq_rate.total_bytes += total_bytes; + irq_data->tx.irq_rate.total_packets += total_packets; + + if (xsk_frames) { + LOG_INFO_BDF("tx xsk frames=%u\n", xsk_frames); + xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); + } + +#ifdef HAVE_NDO_XSK_WAKEUP + if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) + xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); +#endif + + return sxe_zc_xdp_ring_xmit(xdp_ring, irq_data->tx.work_limit); +} + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +bool sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 cleaned_count) +#else +static __always_inline bool __sxe_zc_rx_buffers_alloc( + struct sxe_ring *rx_ring, u16 cleaned_count, + bool alloc(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *bi)) +#endif +{ + union sxe_rx_data_desc *rx_desc; + struct sxe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + bool ret = true; +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma_addr_t dma; +#endif + if (!cleaned_count) { + LOG_ERROR("the cleaned count is 0\n"); + return true; + } + + rx_desc = SXE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->depth; + + do { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); + if (!bi->xdp) { +#else + if (!alloc(rx_ring, bi)) { +#endif + ret = false; + break; + } +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + dma = xsk_buff_xdp_get_dma(bi->xdp); + rx_desc->read.pkt_addr = cpu_to_le64(dma); +#else + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); +#endif + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = SXE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->depth; + } + + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->depth; + + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + rx_ring->next_to_alloc = i; +#endif + wmb(); + writel(i, rx_ring->desc.tail); + } + + return ret; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +static bool sxe_zc_buffer_alloc(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_pool; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) + return true; + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +static bool sxe_fast_zc_rx_buffers_alloc(struct sxe_ring *rx_ring, + u16 count) +{ + return __sxe_zc_rx_buffers_alloc(rx_ring, count, + sxe_zc_buffer_alloc); +} + +static bool sxe_slow_zc_rx_buffers_alloc(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_pool; + u64 handle, hr; + bool is_alloced; + + if (!xsk_umem_peek_addr_rq(umem, &handle)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + is_alloced = false; + goto l_ret; + } + + handle &= rx_ring->xsk_pool->chunk_mask; + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr_rq(umem); + is_alloced = true; + +l_ret: + return is_alloced; +} + +void sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, + u16 count) +{ + __sxe_zc_rx_buffers_alloc(rx_ring, count, + sxe_slow_zc_rx_buffers_alloc); +} + +static struct sxe_rx_buffer *sxe_zc_rx_buffer_get( + struct sxe_ring *rx_ring, u32 size) +{ + struct sxe_rx_buffer *bi; + + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +static void sxe_zc_rx_buffer_reuse(struct sxe_ring *rx_ring, + struct sxe_rx_buffer *old_buf) +{ + u16 nta = rx_ring->next_to_alloc; + struct sxe_rx_buffer *new_buf; + + new_buf = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->depth) ? nta : 0; + + new_buf->dma = old_buf->dma; + new_buf->addr = old_buf->addr; + new_buf->handle = old_buf->handle; + + old_buf->addr = NULL; + old_buf->skb = NULL; +} + +void sxe_zca_free(struct zero_copy_allocator *alloc, + unsigned long handle_addr) +{ + struct sxe_rx_buffer *bi; + struct sxe_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct sxe_ring, zca); + hr = rx_ring->xsk_pool->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_pool->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = rx_ring->rx_buffer_info; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->depth) ? nta : 0; + + handle_addr &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_pool, handle_addr); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_pool, handle_addr); + bi->addr += hr; + + bi->handle = (u64)handle_addr + rx_ring->xsk_pool->headroom; + return; +} +#endif + +static void sxe_ntc_update(struct sxe_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->depth) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(SXE_RX_DESC(rx_ring, ntc)); + return; +} + +static s32 sxe_zc_xdp_run(struct sxe_adapter *adapter, + struct sxe_ring *rx_ring, + struct xdp_buff *xdp) +{ + s32 err, ret = SXE_XDP_PASS; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + struct sxe_ring *ring; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + xdp->handle += xdp->data - xdp->data_hard_start; +#endif + LOG_DEBUG_BDF("zc xdp run act=0x%x\n",act); + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + ret = SXE_XDP_CONSUMED; + break; + } + ring = sxe_xdp_tx_ring_pick(adapter); + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_lock(&ring->tx_lock); + ret = sxe_xdp_ring_xmit(ring, xdpf); + if (static_branch_unlikely(&sxe_xdp_tx_lock_key)) + spin_unlock(&ring->tx_lock); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + ret = !err ? SXE_XDP_REDIR : SXE_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + ret = SXE_XDP_CONSUMED; + break; + } + + rcu_read_unlock(); + return ret; +} + +static struct sk_buff *sxe_zc_skb_construct(struct sxe_ring *rx_ring, +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct sxe_rx_buffer *bi) +#else + struct sxe_rx_buffer *bi, + struct xdp_buff *xdp) +#endif +{ +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_buff *xdp = bi->xdp; +#endif + + u32 metasize = xdp->data - xdp->data_meta; + u32 datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + skb = __napi_alloc_skb(&rx_ring->irq_data->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + LOG_ERROR("[xdp] zc skb alloc failed\n"); + goto l_ret; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) { + skb_metadata_set(skb, metasize); + } +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buff_free(xdp); + bi->xdp = NULL; +#else + sxe_zc_rx_buffer_reuse(rx_ring, bi); +#endif +l_ret: + return skb; +} + +#ifdef XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +static inline void +xsk_buff_dma_sync_for_cpu_compat(struct xdp_buff *xdp, + void __always_unused *pool) +{ + xsk_buff_dma_sync_for_cpu(xdp); +} + +#define xsk_buff_dma_sync_for_cpu(xdp, pool) \ + xsk_buff_dma_sync_for_cpu_compat(xdp, pool) +#endif + +int sxe_zc_rx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *rx_ring, + const int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct sxe_adapter *adapter = irq_data->adapter; + u16 cleaned_count = sxe_desc_unused(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct sxe_ring_stats stats; + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_buff xdp; +#ifndef HAVE_NO_XDP_BUFF_RXQ + xdp.rxq = &rx_ring->xdp_rxq; +#endif +#endif + LOG_DEBUG_BDF("entry xdp zc rx irq clean:irq=%u, ring_idx=%u, ring_reg_idx=%u, " + "ring_tc_idx=%u, next_to_clean=%u, next_to_use=%u, budget=%u\n", + irq_data->irq_idx, rx_ring->idx, rx_ring->reg_idx, rx_ring->tc_idx, + rx_ring->next_to_clean, rx_ring->next_to_use, budget); + + while (likely(total_rx_packets < budget)) { + union sxe_rx_data_desc *rx_desc; + struct sxe_rx_buffer *bi; + unsigned int size; + + if (cleaned_count >= SXE_RX_BUFFER_WRITE) { + failure = failure || +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + !sxe_zc_rx_ring_buffers_alloc(rx_ring, + cleaned_count); +#else + !sxe_fast_zc_rx_buffers_alloc(rx_ring, + cleaned_count); +#endif + cleaned_count = 0; + } + + rx_desc = SXE_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) { + break; + } + + LOG_DEBUG_BDF("process xdp zc rx_desc[%u], write back info:" + "status_error=0x%x, length=%u, vlan=%u\n", + rx_ring->next_to_clean, + le16_to_cpu(rx_desc->wb.upper.status_error), + le16_to_cpu(rx_desc->wb.upper.length), + le16_to_cpu(rx_desc->wb.upper.vlan)); + + dma_rmb(); +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#else + bi = sxe_zc_rx_buffer_get(rx_ring, size); +#endif + if (unlikely(!sxe_status_err_check(rx_desc, + SXE_RXD_STAT_EOP))) { + struct sxe_rx_buffer *next_bi; +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buff_free(bi->xdp); + bi->xdp = NULL; +#else + sxe_zc_rx_buffer_reuse(rx_ring, bi); +#endif + sxe_ntc_update(rx_ring); + next_bi = + &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + next_bi->discard = true; +#else + next_bi->skb = ERR_PTR(-EINVAL); +#endif + continue; + } + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + if (unlikely(bi->discard)) { + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + bi->discard = false; +#else + if (unlikely(bi->skb)) { + sxe_zc_rx_buffer_reuse(rx_ring, bi); +#endif + sxe_ntc_update(rx_ring); + continue; + } + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->xdp->data_end = bi->xdp->data + size; + xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); + xdp_res = sxe_zc_xdp_run(adapter, rx_ring, bi->xdp); +#else + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + xdp_res = sxe_zc_xdp_run(adapter, rx_ring, &xdp); +#endif + LOG_DEBUG_BDF("ring[%u] xdp res=0x%x\n", rx_ring->idx, xdp_res); + if (xdp_res) { + if (xdp_res & (SXE_XDP_TX | SXE_XDP_REDIR)) { + xdp_xmit |= xdp_res; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->addr = NULL; + bi->skb = NULL; +#endif + } else { +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_buff_free(bi->xdp); +#else + sxe_zc_rx_buffer_reuse(rx_ring, bi); +#endif + } + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + bi->xdp = NULL; +#endif + total_rx_packets++; + total_rx_bytes += size; + + cleaned_count++; + sxe_ntc_update(rx_ring); + continue; + } + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + skb = sxe_zc_skb_construct(rx_ring, bi); +#else + skb = sxe_zc_skb_construct(rx_ring, bi, &xdp); +#endif + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + cleaned_count++; + sxe_ntc_update(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + sxe_skb_fields_process(rx_ring, rx_desc, skb); + sxe_rx_skb_deliver(irq_data, skb); + } + + if (xdp_xmit & SXE_XDP_REDIR) { + LOG_DEBUG_BDF("ring[%u] do xdp redir\n", rx_ring->idx); + xdp_do_flush_map(); + } + + if (xdp_xmit & SXE_XDP_TX) { + struct sxe_ring *ring = sxe_xdp_tx_ring_pick(adapter); + sxe_xdp_ring_tail_update_locked(ring); + LOG_DEBUG_BDF("ring[%u] do xdp tx and tx ring idx=%u\n", + rx_ring->idx, ring->idx); + } + + stats.packets = total_rx_packets; + stats.bytes = total_rx_bytes; + sxe_rx_pkt_stats_update(rx_ring, &irq_data->rx.irq_rate, &stats); + +#ifdef HAVE_NDO_XSK_WAKEUP + if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) + xsk_set_rx_need_wakeup(rx_ring->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); + + return (int)total_rx_packets; + } +#endif + + return failure ? budget : (int)total_rx_packets; +} + +#ifdef HAVE_NDO_XSK_WAKEUP +int sxe_xsk_wakeup(struct net_device *dev, u32 qid, + u32 __maybe_unused flags) +#else +int sxe_xsk_async_xmit(struct net_device *dev, u32 qid) +#endif +{ + struct sxe_adapter *adapter = netdev_priv(dev); + struct sxe_ring *ring; + int ret = 0; + + if (test_bit(SXE_DOWN, &adapter->state)) { + ret = -ENETDOWN; + goto l_ret; + } + + if (!READ_ONCE(adapter->xdp_prog)) { + LOG_ERROR_BDF("xdp prog not setup\n"); + ret = -ENXIO; + goto l_ret; + } + + if (qid >= adapter->xdp_ring_ctxt.num) { + LOG_ERROR_BDF("xdp queue id=%u >= xdp ring num=%u\n", + qid, adapter->xdp_ring_ctxt.num); + ret = -ENXIO; + goto l_ret; + } + + ring = adapter->xdp_ring_ctxt.ring[qid]; + if (test_bit(SXE_TX_DISABLED, &ring->state)) + { + ret = -ENETDOWN; + goto l_ret; + } + + if (!ring->xsk_pool) { + LOG_ERROR_BDF("xdp ring=%u do not have xsk_pool\n", qid); + ret = -ENXIO; + goto l_ret; + } + + if (!napi_if_scheduled_mark_missed(&ring->irq_data->napi)) { + + u64 eics = BIT_ULL(ring->irq_data->irq_idx); + sxe_irq_queues_rearm(adapter, eics); + } + +l_ret: + return ret; +} + +void sxe_xsk_tx_ring_clean(struct sxe_ring *tx_ring) +{ + u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; + struct sxe_tx_buffer *tx_bi; + u32 xsk_frames = 0; + + while (ntc != ntu) { + tx_bi = &tx_ring->tx_buffer_info[ntc]; + + if (tx_bi->xdpf) { + sxe_xdp_tx_buffer_clean(tx_ring, tx_bi); + } else { + xsk_frames++; + } + + tx_bi->xdpf = NULL; + + ntc++; + if (ntc == tx_ring->depth) { + ntc = 0; + } + } + + if (xsk_frames) { + xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); + } + + return; +} + +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring) +{ + struct sxe_rx_buffer *bi; + u16 i; + + for (i = 0; i < rx_ring->depth; i++) { + bi = &rx_ring->rx_buffer_info[i]; + + if (!bi->xdp) + continue; + + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + } +} +#else +void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct sxe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; + + while (i != rx_ring->next_to_alloc) { + xsk_umem_fq_reuse(rx_ring->xsk_pool, bi->handle); + i++; + bi++; + if (i == rx_ring->depth) { + i = 0; + bi = rx_ring->rx_buffer_info; + } + } + + return; +} +#endif + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL +static s32 sxe_xsk_umem_dma_map(struct sxe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + u32 i, j; + dma_addr_t dma; + s32 ret = 0; + + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, SXE_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) { + goto out_unmap; + } + + umem->pages[i].dma = dma; + } + + goto l_ret; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, SXE_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + ret = -SXE_ERR_CONFIG; + +l_ret: + return ret; +} + +static void sxe_xsk_umem_dma_unmap(struct sxe_adapter *adapter, + struct xdp_umem *umem) +{ + struct device *dev = &adapter->pdev->dev; + u32 i; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, SXE_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } + + return; +} +#endif + +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL +static s32 sxe_xsk_pool_enable(struct sxe_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid) + +#else +static s32 sxe_xsk_pool_enable(struct sxe_adapter *adapter, + struct xdp_umem *pool, + u16 qid) +#endif +{ + struct net_device *netdev = adapter->netdev; +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + struct xdp_umem_fq_reuse *reuseq; +#endif + bool if_running; + s32 ret; + + LOG_DEBUG_BDF("xdp xsk umem enable qid=%u\n", qid); + if (qid >= adapter->rx_ring_ctxt.num) { + LOG_ERROR_BDF("xdp queue id[%u] >= rx ring num[%u]\n", + qid, adapter->rx_ring_ctxt.num); + ret = -EINVAL; + goto l_ret; + } + + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) { + LOG_ERROR_BDF("xdp queue id[%u] >= real rx ring num[%u]" + "or real tx ring num[%u]\n",qid, + netdev->real_num_rx_queues, netdev->real_num_tx_queues); + ret = -EINVAL; + goto l_ret; + } +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + ret = xsk_pool_dma_map(pool, &adapter->pdev->dev, SXE_RX_DMA_ATTR); +#else + reuseq = xsk_reuseq_prepare(adapter->rx_ring_ctxt.ring[0]->depth); + if (!reuseq) { + LOG_ERROR_BDF("xdp xsk umem fill queue reuse in queue 0 is NULL\n"); + ret = -ENOMEM; + goto l_ret; + } + xsk_reuseq_free(xsk_reuseq_swap(pool, reuseq)); + ret = sxe_xsk_umem_dma_map(adapter, pool); +#endif + if (ret) { + LOG_ERROR_BDF("xdp xsk umem[%p] dma map error,ret=%d\n", pool, ret); + goto l_ret; + } + + if_running = netif_running(adapter->netdev) && + sxe_xdp_adapter_enabled(adapter); + + if (if_running) { + sxe_txrx_ring_disable(adapter, qid); + } + + set_bit(qid, adapter->af_xdp_zc_qps); + LOG_DEBUG_BDF("xdp queue id[%u] set af xdp zc qps=0x%lx\n", + qid, *adapter->af_xdp_zc_qps); + + if (if_running) { + sxe_txrx_ring_enable(adapter, qid); +#ifdef HAVE_NDO_XSK_WAKEUP + ret = sxe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); +#else + ret = sxe_xsk_async_xmit(adapter->netdev, qid); +#endif + if (ret) { + clear_bit(qid, adapter->af_xdp_zc_qps); +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_pool_dma_unmap(pool, SXE_RX_DMA_ATTR); +#else + sxe_xsk_umem_dma_unmap(adapter, pool); +#endif + LOG_ERROR_BDF("async xmit in queue id[%u] error,ret=%d\n", + qid, ret); + } + } + +l_ret: + return ret; +} + +static s32 sxe_xsk_pool_disable(struct sxe_adapter *adapter, u16 qid) +{ +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL + struct xsk_buff_pool *pool; +#else + struct xdp_umem *pool; +#endif + bool if_running; + s32 ret = 0; + + LOG_DEBUG_BDF("xdp xsk umem disable qid=%u\n", qid); + +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL + pool = xsk_get_pool_from_qid(adapter->netdev, qid); +#else + pool = xdp_get_umem_from_qid(adapter->netdev, qid); +#endif + if (!pool) { + LOG_ERROR_BDF("xdp xsk get umem error,qid=%u\n", qid); + ret = -EINVAL; + goto l_ret; + } + + if_running = netif_running(adapter->netdev) && + sxe_xdp_adapter_enabled(adapter); + + if (if_running) { + sxe_txrx_ring_disable(adapter, qid); + } + + clear_bit(qid, adapter->af_xdp_zc_qps); +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL + xsk_pool_dma_unmap(pool, SXE_RX_DMA_ATTR); +#else + sxe_xsk_umem_dma_unmap(adapter, pool); +#endif + if (if_running) { + sxe_txrx_ring_enable(adapter, qid); + } + +l_ret: + return ret; +} + +static s32 sxe_xsk_pool_setup(struct sxe_adapter *adapter, +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL + struct xsk_buff_pool *pool, +#else + struct xdp_umem *pool, +#endif + u16 qid) +{ + return pool ? sxe_xsk_pool_enable(adapter, pool, qid) : + sxe_xsk_pool_disable(adapter, qid); +} + +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL +struct xsk_buff_pool *sxe_xsk_pool_get(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + struct xsk_buff_pool *pool = NULL; +#else +struct xdp_umem *sxe_xsk_pool_get(struct sxe_adapter *adapter, + struct sxe_ring *ring) +{ + struct xdp_umem *pool = NULL; +#endif + bool xdp_on = !!READ_ONCE(adapter->xdp_prog); + u16 qid = ring->ring_idx; + + if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) { + LOG_DEBUG_BDF("xdp state=%s or queue id[%u] not set xdp\n", + xdp_on ? "on" : "off", qid); + goto l_ret; + } +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL + pool = xsk_get_pool_from_qid(adapter->netdev, qid); +#else + pool = xdp_get_umem_from_qid(adapter->netdev, qid); +#endif +l_ret: + return pool; +} +#endif + +int sxe_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct sxe_adapter *adapter = netdev_priv(dev); + s32 ret; + + switch (xdp->command) { + case XDP_SETUP_PROG: + LOG_DEBUG_BDF("xdp setup prog, prog=%p\n",xdp->prog); + ret = sxe_xdp_setup(dev, xdp->prog); + break; +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: + LOG_DEBUG_BDF("xdp query prog\n"); + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + ret = 0; + break; +#endif + +#ifdef HAVE_AF_XDP_ZERO_COPY + case XDP_SETUP_XSK_POOL: +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL + LOG_DEBUG_BDF("xdp setup xsk pool pool=%p, queue_id=%u\n", + xdp->xsk.pool, xdp->xsk.queue_id); + ret = sxe_xsk_pool_setup(adapter, xdp->xsk.pool, + xdp->xsk.queue_id); +#else + LOG_DEBUG_BDF("xdp setup xsk umem umem=%p, queue_id=%u\n", + xdp->xsk.umem, xdp->xsk.queue_id); + ret = sxe_xsk_pool_setup(adapter, xdp->xsk.umem, + xdp->xsk.queue_id); +#endif + break; +#endif + + default: + LOG_DEBUG_BDF("invalid xdp cmd= %u\n",xdp->command); + ret = -EINVAL; + } + + return ret; +} + +#else +struct sk_buff *sxe_xdp_run(struct sxe_adapter *adapter, + struct sxe_ring *rx_ring, + struct xdp_buff *xdp) +{ + return ERR_PTR(-SXE_XDP_PASS); +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.h b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.h new file mode 100644 index 000000000000..56d1436347fd --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/sxepf/sxe_xdp.h @@ -0,0 +1,108 @@ + +#ifndef __SXE_XDP_H__ +#define __SXE_XDP_H__ + +#include "sxe.h" +#ifdef HAVE_XDP_SUPPORT +#include +#include +#endif +#ifndef HAVE_NO_XDP_BUFF_RXQ +#include +#endif + +#define SXE_XDP_PASS (0) +#define SXE_XDP_CONSUMED BIT(0) +#define SXE_XDP_TX BIT(1) +#define SXE_XDP_REDIR BIT(2) + +#ifdef HAVE_AF_XDP_ZERO_COPY +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +#else +#include +#endif + +static inline bool sxe_xdp_adapter_enabled(struct sxe_adapter *adapter) +{ + return !!adapter->xdp_prog; +} + +#ifndef HAVE_MEM_TYPE_XSK_BUFF_POOL + +bool sxe_xdp_tx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *tx_ring, int napi_budget); + +int sxe_zc_rx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *rx_ring, const int budget); + +int sxe_xsk_async_xmit(struct net_device *dev, u32 qid); + +void sxe_zca_free(struct zero_copy_allocator *alloc, + unsigned long handle_addr); + +void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring); + +void sxe_xsk_tx_ring_clean(struct sxe_ring *tx_ring); + +void sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 count); + +#else + +bool sxe_zc_rx_ring_buffers_alloc(struct sxe_ring *rx_ring, u16 count); + +s32 sxe_zc_rx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *rx_ring, + const int budget); + +void sxe_xsk_rx_ring_clean(struct sxe_ring *rx_ring); + +bool sxe_xdp_tx_ring_irq_clean(struct sxe_irq_data *irq_data, + struct sxe_ring *tx_ring, int napi_budget); + +void sxe_xsk_tx_ring_clean(struct sxe_ring *tx_ring); + +#endif + +#ifdef HAVE_NETDEV_BPF_XSK_BUFF_POOL +struct xsk_buff_pool *sxe_xsk_pool_get(struct sxe_adapter *adapter, + struct sxe_ring *ring); +#else +struct xdp_umem *sxe_xsk_pool_get(struct sxe_adapter *adapter, + struct sxe_ring *ring); +#endif + +#ifdef HAVE_NDO_XSK_WAKEUP +int sxe_xsk_wakeup(struct net_device *dev, u32 qid, + u32 __maybe_unused flags); +#endif + +#endif + +#ifdef HAVE_XDP_SUPPORT +DECLARE_STATIC_KEY_FALSE(sxe_xdp_tx_lock_key); +static inline +struct sxe_ring *sxe_xdp_tx_ring_pick(struct sxe_adapter *adapter) +{ + s32 cpu = smp_processor_id(); + u16 idx = static_key_enabled(&sxe_xdp_tx_lock_key) ? \ + cpu % SXE_XDP_RING_NUM_MAX : cpu; + + return adapter->xdp_ring_ctxt.ring[idx]; +} + +void sxe_xdp_ring_tail_update_locked(struct sxe_ring *ring); + +int sxe_xdp(struct net_device *dev, struct netdev_bpf *xdp); + +int sxe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags); + +u32 sxe_max_xdp_frame_size(struct sxe_adapter *adapter); +#endif + +struct sk_buff *sxe_xdp_run(struct sxe_adapter *adapter, + struct sxe_ring *rx_ring, + struct xdp_buff *xdp); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxe/vercode_build.sh b/drivers/net/ethernet/linkdata/sxe/vercode_build.sh new file mode 100644 index 000000000000..6022874ecf54 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxe/vercode_build.sh @@ -0,0 +1,91 @@ +#! /bin/bash + +# 传入的内核树路径 +kernel_path=$1 + +############################################### +# 兼容性列表:LIST与MACRO中的元素一一对应 +# 如需增加其他操作系统,新增LIST与对应MACRO数组 +############################################### +# NFS +NFS_LIST=("4.19.113-14.1.nfs4.x86_64" "4.19.113-40.nfs4.x86_64") +NFS_MACRO=("NFS_4_0_0613" "NFS_4_0_0612") +# UOS +UOS_LIST=("4.19.90-2201.4.0.0135.up1.uel20.x86_64" "4.19.90-2305.1.0.0199.56.uel20.x86_64" \ + "5.10.0-46.uel20.x86_64" "4.19.90-2403.3.0.0270.84.uel20.x86_64" "5.10.0-74.uel20.x86_64") +UOS_MACRO=("UOS_1050" "UOS_1060_4_19" "UOS_1060_5_10" "UOS_1070_4_19" "UOS_1070_5_10") +# kylin linux +KYLIN_LIST=("4.19.90-24.4.v2101.ky10.x86_64" "4.19.90-vhulk2001.1.0.0026.ns7.15.x86_64" \ + "4.19.90-21.2.9.wa.x86_64") +KYLIN_MACRO=("KYLIN_10_SP2" "KYLIN_0429" "KYLIN_0721") +# anolis +ANOLIS_LIST=("5.10.134-13.an8.x86_64") +ANOLIS_MACRO=("ANOLIS_8_8") +# openeuler +EULER_LIST=("5.10.0-60.18.0.50.oe2203.x86_64") +EULER_MACRO=("EULER_2203_LTS") +# bc-linux +BCLINUX_LIST=("4.19.0-240.23.11.el8_2.bclinux.x86_64" "5.10.0-200.el8_2.bclinux.x86_64") +BCLINUX_MACRO=("BCLINUX_8_2_4_19" "BCLINUX_8_2_5_10") +# culinux +CULINUX_LIST=("5.10.0-60.67.0.107.ule3.x86_64") +CULINUX_MACRO=("CULINUX_3_0") + +KERNEL_LIST=(NFS_LIST UOS_LIST KYLIN_LIST ANOLIS_LIST EULER_LIST BCLINUX_LIST CULINUX_LIST) +MACRO_LIST=(NFS_MACRO UOS_MACRO KYLIN_MACRO ANOLIS_MACRO EULER_MACRO BCLINUX_MACRO CULINUX_MACRO) + +############################################### +# 获取元素在数组中的下标 +############################################### +function getArrItemIdx(){ + local arr=$1 + local item=$2 + local index=0 + + for i in ${arr[*]}; do + if [[ $item == $i ]] + then + echo $index + return + fi + index=$(($index + 1)) + done + + echo -1 + return +} + +############################################### +# 获取要编译驱动的内核版本 +############################################### +function getKernelVersion(){ + local uts_h="/include/generated/utsrelease.h" + version_path=$1$uts_h + if [ ! -f $version_path ];then + return + fi + cat $version_path | grep UTS_RELEASE | awk '{ print $3 }' | sed 's/\"//g' + return +} + +############################################## +# main 返回当前内核版本对应的宏 +############################################## +function main(){ + local build_kernel=$(getKernelVersion $kernel_path) + local row=0 + for OS_TYPE in ${KERNEL_LIST[*]}; do + kernel_tmp=$OS_TYPE[*] + macro_tmp=${MACRO_LIST[row]}[*] + KERNELS=(${!kernel_tmp}) + MACROS=(${!macro_tmp}) + col=$(getArrItemIdx "${KERNELS[*]}" $build_kernel) + if [ $col != -1 ]; then + echo ${MACROS[col]} + return + fi + row=$(($row + 1)) + done +} + +main \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/Kconfig b/drivers/net/ethernet/linkdata/sxevf/Kconfig new file mode 100644 index 000000000000..d20c5c32bfcf --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# sxevf network device configuration +# + +config SXE_VF + tristate "sxevf PCI Express adapters support" + depends on PCI + select MDIO + select PHYLIB + select PTP_1588_CLOCK + help + This driver supports sxevf PCI Express family of adapters. + + To compile this driver as a module, choose M here. The module + will be called ngbe. diff --git a/drivers/net/ethernet/linkdata/sxevf/Makefile b/drivers/net/ethernet/linkdata/sxevf/Makefile new file mode 100644 index 000000000000..e0fba6d2085e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +CONFIG_MODULE_SIG=n + +#当前目录下的Makefile路径 +MAKEPATH := $(abspath $(lastword $(MAKEFILE_LIST))) +#当前路径 +CURDIR :=$(shell dirname $(MAKEPATH)) + +ifneq ($(KERNELRELEASE),) +#编译 +CONFIG_SXE_VF ?= m +obj-$(CONFIG_SXE_VF) += sxevf.o +sxevf-objs += $(patsubst %.c, sxevf/%.o, $(notdir $(wildcard $(CURDIR)/sxevf/*.c))) +sxevf-objs += $(patsubst %.c, base/trace/%.o, $(notdir $(wildcard $(CURDIR)/base/trace/*.c))) +sxevf-objs += $(patsubst %.c, base/log/%.o, $(notdir $(wildcard $(CURDIR)/base/log/*.c))) + +#添加编译选项和编译宏 +ccflags-y += -Werror -Wmaybe-uninitialized -frecord-gcc-switches +ccflags-y += -I$(CURDIR)/sxevf +ccflags-y += -I$(CURDIR)/include/sxe +ccflags-y += -I$(CURDIR)/include +ccflags-y += -I$(CURDIR)/base/compat +ccflags-y += -I$(CURDIR)/base/trace +ccflags-y += -I$(CURDIR)/base/log +ccflags-y += -DSXE_HOST_DRIVER +ccflags-y += -DSXE_DRIVER_RELEASE +ccflags-$(CONFIG_DCB) += -DSXE_DCB_CONFIGURE +ifneq ($(CONFIG_DCA), ) + ccflags-y += -DSXE_TPH_CONFIGURE +endif + +# 生成 linux kernel version code +ifneq ($(wildcard $(CURDIR)/vercode_build.sh),) + KER_DIR=$(srctree) + SPECIFIC_LINUX=$(shell bash $(CURDIR)/vercode_build.sh $(KER_DIR)) + ifneq ($(SPECIFIC_LINUX),) + ccflags-y += -DSPECIFIC_LINUX + ccflags-y += -D$(SPECIFIC_LINUX) + endif +endif + +else # KERNELRELEASE +#内核树路径 +KDIR := /lib/modules/$(shell uname -r)/build +all: + @$(MAKE) -C $(KDIR) M=$(CURDIR) modules + +clean: + @rm -rf *.o *.d *.ko Module.* modules.* *.mod* .*.d .*.cmd .tmp_versions *readme.txt + @rm -rf ./sxevf/*.o ./sxevf/.*.cmd + @rm -rf ./base/log/*.o ./base/trace/*.o + +endif # KERNELRELEASE diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat.h new file mode 100644 index 000000000000..1698929c4fab --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat.h @@ -0,0 +1,28 @@ +#ifndef __SXE_COMPAT_H__ +#define __SXE_COMPAT_H__ + +#include "sxe_compat_gcc.h" + +#include "sxe_compat_inc.h" + +#include "sxe_compat_vercode.h" + +#ifdef SPECIFIC_LINUX +#include "sxe_compat_spec.h" +#elif RHEL_RELEASE_CODE +#include "sxe_compat_rhel.h" +#elif UBUNTU_VERSION_CODE +#include "sxe_compat_ubuntu.h" +#elif OPENEULER_VERSION_CODE +#include "sxe_compat_euler.h" +#elif KYLIN_RELEASE_CODE +#include "sxe_compat_kylin.h" +#elif SUSE_PRODUCT_CODE +#include "sxe_compat_suse.h" +#endif + +#ifndef SXE_KERNEL_MATCHED +#include "sxe_compat_std.h" +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_euler.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_euler.h new file mode 100644 index 000000000000..7a810ced458b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_euler.h @@ -0,0 +1,54 @@ +#ifndef __SXE_COMPAT_EULER_H__ +#define __SXE_COMPAT_EULER_H__ + +#if !OPENEULER_VERSION_CODE +#error "OPENEULER_VERSION_CODE is 0 or undefined" +#endif + +#if defined OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE == OPENEULER_VERSION(2203,1)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if defined OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE == OPENEULER_VERSION(2203,2)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_gcc.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_gcc.h new file mode 100644 index 000000000000..56425964356a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_gcc.h @@ -0,0 +1,14 @@ +#ifndef __SXE_COMPAT_GCC_H__ +#define __SXE_COMPAT_GCC_H__ + +#ifdef __has_attribute +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) +#endif +#else +# define fallthrough do {} while (0) +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_inc.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_inc.h new file mode 100644 index 000000000000..0acf3dc2a336 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_inc.h @@ -0,0 +1,6 @@ +#ifndef __SXE_COMPAT_INC_H__ +#define __SXE_COMPAT_INC_H__ + +#include + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_kylin.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_kylin.h new file mode 100644 index 000000000000..bb245e86966f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_kylin.h @@ -0,0 +1,77 @@ +#ifndef __SXE_COMPAT_KYLIN_H__ +#define __SXE_COMPAT_KYLIN_H__ + +#if !KYLIN_RELEASE_CODE +#error "KYLIN_RELEASE_CODE is 0 or undefined" +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define HAVE_NDO_SET_VF_LINK_STATE +#define SXE_KERNEL_MATCHED +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,3)) +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define HAVE_AF_XDP_ZERO_COPY +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC + +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if defined KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE == KYLIN_RELEASE_VERSION(10,4)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_overflow.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_overflow.h new file mode 100644 index 000000000000..39ed8f073a34 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_overflow.h @@ -0,0 +1,178 @@ + +#ifndef __LINUX_OVERFLOW_H +#define __LINUX_OVERFLOW_H + +#include + +#define _kc_is_signed_type(type) (((type)(-1)) < (type)1) +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - _kc_is_signed_type(type))) +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) +#define type_min(T) ((T)((T)-type_max(T)-(T)1)) + +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW +#define check_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_add_overflow(__a, __b, __d); \ +}) + +#define check_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_sub_overflow(__a, __b, __d); \ +}) + +#define check_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + __builtin_mul_overflow(__a, __b, __d); \ +}) + +#else + +#define __unsigned_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a + __b; \ + *__d < __a; \ +}) +#define __unsigned_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a - __b; \ + __a < __b; \ +}) +#define __unsigned_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = __a * __b; \ + __builtin_constant_p(__b) ? \ + __b > 0 && __a > type_max(typeof(__a)) / __b : \ + __a > 0 && __b > type_max(typeof(__b)) / __a; \ +}) + + +#define __signed_add_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a + (u64)__b; \ + (((~(__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + +#define __signed_sub_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a - (u64)__b; \ + ((((__a ^ __b)) & (*__d ^ __a)) \ + & type_min(typeof(__a))) != 0; \ +}) + + +#define __signed_mul_overflow(a, b, d) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + typeof(d) __d = (d); \ + typeof(a) __tmax = type_max(typeof(a)); \ + typeof(a) __tmin = type_min(typeof(a)); \ + (void) (&__a == &__b); \ + (void) (&__a == __d); \ + *__d = (u64)__a * (u64)__b; \ + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ + (__b == (typeof(__b))-1 && __a == __tmin); \ +}) + +#define check_add_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_add_overflow(a, b, d), \ + __unsigned_add_overflow(a, b, d)) + +#define check_sub_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_sub_overflow(a, b, d), \ + __unsigned_sub_overflow(a, b, d)) + +#define check_mul_overflow(a, b, d) \ + __builtin_choose_expr(_kc_is_signed_type(typeof(a)), \ + __signed_mul_overflow(a, b, d), \ + __unsigned_mul_overflow(a, b, d)) + +#endif + +#define check_shl_overflow(a, s, d) ({ \ + typeof(a) _a = a; \ + typeof(s) _s = s; \ + typeof(d) _d = d; \ + u64 _a_full = _a; \ + unsigned int _to_shift = \ + _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \ + *_d = (_a_full << _to_shift); \ + (_to_shift != _s || *_d < 0 || _a < 0 || \ + (*_d >> _to_shift) != _a); \ +}) + +static inline __must_check size_t array_size(size_t a, size_t b) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(a, b, &bytes)) + return SIZE_MAX; + if (check_mul_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +static inline __must_check size_t __ab_c_size(size_t n, size_t size, size_t c) +{ + size_t bytes; + + if (check_mul_overflow(n, size, &bytes)) + return SIZE_MAX; + if (check_add_overflow(bytes, c, &bytes)) + return SIZE_MAX; + + return bytes; +} + +#define struct_size(p, member, n) \ + __ab_c_size(n, \ + sizeof(*(p)->member) + __must_be_array((p)->member),\ + sizeof(*(p))) + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_rhel.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_rhel.h new file mode 100644 index 000000000000..e4ace7925bca --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_rhel.h @@ -0,0 +1,377 @@ +#ifndef __SXE_COMPAT_RHEL_H__ +#define __SXE_COMPAT_RHEL_H__ + +#if !RHEL_RELEASE_CODE +#error "RHEL_RELEASE_CODE is 0 or undefined" +#endif + +#ifndef RHEL_RELEASE_VERSION +#error "RHEL_RELEASE_VERSION is undefined" +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,6)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,7)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,8)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,9)) +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_RHEL7_GSO_FEATURE +#define DCBNL_OPS_GETAPP_RETURN_U8 +#define HAVE_DMA_ATTRS_STRUCT +#define HAVE_NET_DEVICE_EXTENDED +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define NEED_BOOTTIME_SECONDS +#define HAVE_NDO_SET_VF_LINK_STATE + +#define netdev_xmit_more() (skb->xmit_more) + +#define NOT_INCLUDE_SCTP_H +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_NDO_SET_VF_LINK_STATE +#define NDO_SET_FEATURES_RTN_0 +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define netdev_xmit_more() (skb->xmit_more) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define netdev_xmit_more() (skb->xmit_more) +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,2)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,3)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,4)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,5)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,6)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,7)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,1)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(9,2)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_spec.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_spec.h new file mode 100644 index 000000000000..a91964415e65 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_spec.h @@ -0,0 +1,310 @@ +#ifndef __SXE_COMPAT_SPEC_H__ +#define __SXE_COMPAT_SPEC_H__ + +#ifndef SPECIFIC_LINUX +#error "SPECIFIC_LINUX is undefined" +#endif + +#ifdef NFS_4_0_0613 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define netdev_xmit_more() (skb->xmit_more) +#define SXE_KERNEL_MATCHED +#endif + +#ifdef NFS_4_0_0612 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define netdev_xmit_more() (skb->xmit_more) +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1050 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1060_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1060_5_10 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1070_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define SXE_KERNEL_MATCHED +#endif + +#ifdef UOS_1070_5_10 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef CULINUX_3_0 +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_10_SP2 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_0429 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef KYLIN_0721 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_NDO_SET_VF_LINK_STATE +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#define NEED_SKB_FRAG_OFF_API +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_SIZE_API +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define HAVE_NETDEV_NESTED_PRIV +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define ETH_P_LLDP 0x88CC +#define NETDEV_XMIT_MORE_WORK_AROUND +#define SXE_KERNEL_MATCHED +#endif + +#ifdef ANOLIS_8_8 +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef EULER_2203_LTS +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#ifdef BCLINUX_8_2_4_19 +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_tx_peek_desc xsk_umem_consume_tx +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define SXE_KERNEL_MATCHED +#endif + +#ifdef BCLINUX_8_2_5_10 +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_std.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_std.h new file mode 100644 index 000000000000..cf8727f755d5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_std.h @@ -0,0 +1,550 @@ +#ifndef __SXE_COMPAT_STD_H__ +#define __SXE_COMPAT_STD_H__ + +#ifndef LINUX_VERSION_CODE +#error "LINUX_VERSION_CODE is undefined" +#endif + +#ifndef KERNEL_VERSION +#error "KERNEL_VERSION is undefined" +#endif + +#ifdef SXE_KERNEL_MATCHED +#error "SXE_KERNEL_MATCHED is defined" +#endif + + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,6))) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(9,0)) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif +#endif +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,1))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#if (OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE >= OPENEULER_VERSION(2203,1))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif + +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE > SUSE_PRODUCT(1,15,2,0))) +#define HAVE_ETHTOOL_COALESCE_EXTACK +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,16,0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#undef XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#endif +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,6)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,5))) +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) +#define HAVE_XDP_BUFF_DATA_META +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif +#else +#define HAVE_XDP_BUFF_FRAME_SIZE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_XDP_BUFF_INIT_API +#endif +#else +#define HAVE_XDP_BUFF_INIT_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,12,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_XDP_PREPARE_BUFF_API +#endif +#else +#define HAVE_XDP_PREPARE_BUFF_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_NETDEV_NESTED_PRIV +#endif +#else +#define HAVE_NETDEV_NESTED_PRIV +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_TIMEOUT_TXQUEUE_IDX +#endif +#else +#define HAVE_TIMEOUT_TXQUEUE_IDX +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_NET_PREFETCH_API +#endif +#else +#define HAVE_NET_PREFETCH_API +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_FDB_ADD_EXTACK +#endif +#else +#define HAVE_NDO_FDB_ADD_EXTACK +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif +#else +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) +#define HAVE_NDO_SET_VF_LINK_STATE +#endif +#else +#define HAVE_NDO_SET_VF_LINK_STATE +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#define ETH_GET_HEADLEN_API_NEED_2_PARAM +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#undef ETH_GET_HEADLEN_API_NEED_2_PARAM +#endif + +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#undef ETH_GET_HEADLEN_API_NEED_2_PARAM +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#define NEED_SKB_FRAG_OFF_ADD_API +#define NEED_SKB_FRAG_OFF_API +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,14,241) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +#undef NEED_SKB_FRAG_OFF_API +#endif +#if (LINUX_VERSION_CODE > KERNEL_VERSION(4,19,200) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#undef NEED_SKB_FRAG_OFF_API +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2)) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif + +#if (UBUNTU_VERSION_CODE && (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,15,0,159) && \ + UBUNTU_VERSION_CODE < UBUNTU_VERSION(4,15,0,999))) +#undef NEED_SKB_FRAG_OFF_API +#endif + +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1,15,2,0))) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif + +#if (KYLIN_RELEASE_CODE && (KYLIN_RELEASE_CODE >= KYLIN_RELEASE_VERSION(10,3))) +#undef NEED_SKB_FRAG_OFF_API +#undef NEED_SKB_FRAG_OFF_ADD_API +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#ifndef ETH_P_LLDP +#define ETH_P_LLDP 0x88CC +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,0)) +#define HAVE_NDO_ETH_IOCTL +#endif +#else +#define HAVE_NDO_ETH_IOCTL +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)) +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#else +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9,2)) +#define NETIF_NAPI_ADD_API_NEED_3_PARAMS +#endif +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) +#define HAVE_SKB_XMIT_MORE +#endif +#else +#define HAVE_SKB_XMIT_MORE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)) +#ifdef HAVE_SKB_XMIT_MORE +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,2))) +#define netdev_xmit_more() (skb->xmit_more) +#endif +#else +#define netdev_xmit_more() (0) +#endif +#endif + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)) +#define DCBNL_OPS_GETAPP_RETURN_U8 +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_DMA_ATTRS_STRUCT +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#define HAVE_NET_DEVICE_EXTENDED +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) +#ifndef skb_frag_size +#define NEED_SKB_FRAG_SIZE_API +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_BOOTTIME_SECONDS +#endif + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) +#define NOT_INCLUDE_SCTP_H +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) +#define HAVE_XDP_SUPPORT +#endif + +#ifdef HAVE_XDP_SUPPORT +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)) +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) +#define HAVE_XDP_QUERY_PROG +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#undef HAVE_XDP_QUERY_PROG +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)) +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,7))) +#if (RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(9,0)) +#define BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +#endif +#endif +#endif + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#else +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,7))) +#if RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(9,0) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif +#endif + +#if (OPENEULER_VERSION_CODE && (OPENEULER_VERSION_CODE >= OPENEULER_VERSION(2203,1))) +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,1))) +#define HAVE_AF_XDP_ZERO_COPY +#endif +#else +#define HAVE_AF_XDP_ZERO_COPY +#endif + +#ifdef HAVE_AF_XDP_ZERO_COPY +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,4))) +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif +#else +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5))) +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#endif +#else +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)) +#if (SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE >= SUSE_PRODUCT(1,15,2,0))) +#define HAVE_NDO_XSK_WAKEUP +#endif +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,3))) +#define HAVE_NDO_XSK_WAKEUP +#endif +#else +#define HAVE_NDO_XSK_WAKEUP +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#define XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#endif +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5)) +#undef XSK_BUFF_DMA_SYNC_API_NEED_1_PARAM +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) +#define NEED_XSK_BUFF_POOL_RENAME +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8,5)) +#undef NEED_XSK_BUFF_POOL_RENAME +#endif +#endif + +#ifdef NEED_XSK_BUFF_POOL_RENAME +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define xsk_get_pool_from_qid xdp_get_umem_from_qid +#define xsk_pool_get_rx_frame_size xsk_umem_get_rx_frame_size +#define xsk_pool_set_rxq_info xsk_buff_set_rxq_info +#define xsk_pool_dma_unmap xsk_buff_dma_unmap +#define xsk_pool_dma_map xsk_buff_dma_map +#define xsk_tx_peek_desc xsk_umem_consume_tx +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5,3,0)) +#define XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(8,1)) +#undef XSK_UMEM_CONSUME_TX_NEED_3_PARAMS +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#else +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(8,3)) +#define xsk_umem_discard_addr xsk_umem_release_addr +#define xsk_umem_discard_addr_rq xsk_umem_release_addr_rq +#endif +#endif +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0)) +#define u64_stats_fetch_begin_irq u64_stats_fetch_begin +#define u64_stats_fetch_retry_irq u64_stats_fetch_retry +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0)) +#define CLASS_CREATE_NEED_1_PARAM +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0)) +#define DEFINE_SEMAPHORE_NEED_CNT +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,6,0)) +#define DELETE_PCIE_ERROR_REPORTING +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)) +#define HAVE_ETH_HW_ADDR_SET_API +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#define HAVE_PTP_CLOCK_INFO_ADJFINE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) +#define HAVE_NO_XDP_BUFF_RXQ +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_OVERFLOW_H +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)))) +#ifndef dma_map_page_attrs +#define dma_map_page_attrs __kc_dma_map_page_attrs +static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, + struct page *page, + size_t offset, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + return dma_map_page(dev, page, offset, size, dir); +} +#endif +#ifndef dma_unmap_page_attrs +#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs +static inline void __kc_dma_unmap_page_attrs(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir, + unsigned long __always_unused attrs) +{ + dma_unmap_page(dev, addr, size, dir); +} +#endif +static inline void __page_frag_cache_drain(struct page *page, + unsigned int count) +{ + if (!page_ref_sub_and_test(page, count)) + return; + + init_page_count(page); + + __free_pages(page, compound_order(page)); +} +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5)) +#undef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +struct _kc_xdp_buff { + void *data; + void *data_end; + void *data_hard_start; +}; +#define xdp_buff _kc_xdp_buff +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#define NO_VOID_NDO_GET_STATS64 +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#undef NO_VOID_NDO_GET_STATS64 +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define NO_NETDEVICE_MIN_MAX_MTU +#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,5))) +#undef NO_NETDEVICE_MIN_MAX_MTU +#endif +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#if (!RHEL_RELEASE_CODE) || \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,5))) +static inline bool _kc_napi_complete_done(struct napi_struct *napi, + int __always_unused work_done) +{ + napi_complete(napi); + + return true; +} + +#ifdef napi_complete_done +#undef napi_complete_done +#endif +#define napi_complete_done _kc_napi_complete_done +#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) +#define HAVE_NO_PCIE_FLR +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) +#define HAVE_NO_HWTSTAMP_FILTER_NTP_ALL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)) +#define TIMER_DATA_TYPE unsigned long +#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE) +#define timer_setup(timer, callback, flags) \ + __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \ + (TIMER_DATA_TYPE)(timer), (flags)) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) +#define NO_NEED_SIGNAL_H +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_MACVLAN_DEST_FILTER +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_SB_BIND_CHANNEL +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define HAVE_NO_MACVLAN_RELEASE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NEED_SET_MACVLAN_MODE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)) +#define NO_NEED_POOL_DEFRAG +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) +#define HAVE_NO_WALK_UPPER_DEV +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_suse.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_suse.h new file mode 100644 index 000000000000..61c2906f6d41 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_suse.h @@ -0,0 +1,57 @@ +#ifndef __SXE_COMPAT_SUSE_H__ +#define __SXE_COMPAT_SUSE_H__ + +#if !CONFIG_SUSE_KERNEL +#error "CONFIG_SUSE_KERNEL is 0 or undefined" +#endif + +#if !SUSE_PRODUCT_CODE +#error "SUSE_PRODUCT_CODE is 0 or undefined" +#endif + +#if defined SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1,15,2,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_XSK_UMEM_ADJUST_OFFSET +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define SXE_KERNEL_MATCHED +#endif + +#if defined SUSE_PRODUCT_CODE && (SUSE_PRODUCT_CODE == SUSE_PRODUCT(1,15,4,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_ubuntu.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_ubuntu.h new file mode 100644 index 000000000000..e933f6cecf62 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_ubuntu.h @@ -0,0 +1,83 @@ +#ifndef __SXE_COMPAT_UBUNTU_H__ +#define __SXE_COMPAT_UBUNTU_H__ + +#if !UTS_UBUNTU_RELEASE_ABI +#error "UTS_UBUNTU_RELEASE_ABI is 0 or undefined" +#endif + +#if !UBUNTU_VERSION_CODE +#error "UBUNTU_VERSION_CODE is 0 or undefined" +#endif + +#ifndef UBUNTU_VERSION +#error "UBUNTU_VERSION is undefined" +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,4,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,5,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_QUERY_PROG +#define XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT + +#define XDP_SETUP_XSK_POOL XDP_SETUP_XSK_UMEM +#define xdp_convert_buff_to_frame convert_to_xdp_frame +#define xsk_tx_release xsk_umem_consume_tx_done +#define xsk_tx_completed xsk_umem_complete_tx +#define xsk_uses_need_wakeup xsk_umem_uses_need_wakeup +#define SXE_KERNEL_MATCHED +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,11,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,12,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define XDP_XMIT_FRAME_FAILED_NEED_FREE +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#if (UBUNTU_VERSION_CODE >= UBUNTU_VERSION(5,15,0,0)) && \ + (UBUNTU_VERSION_CODE < UBUNTU_VERSION(5,16,0,0)) +#define HAVE_XDP_SUPPORT +#define HAVE_XDP_BUFF_DATA_META +#define HAVE_MEM_TYPE_XSK_BUFF_POOL +#define HAVE_NETDEV_BPF_XSK_BUFF_POOL +#define HAVE_AF_XDP_ZERO_COPY +#define HAVE_XDP_BUFF_FRAME_SIZE +#define HAVE_XDP_BUFF_INIT_API +#define HAVE_XDP_PREPARE_BUFF_API +#define HAVE_SKB_CSUM_SCTP_API +#define HAVE_ETHTOOL_COALESCE_EXTACK +#define HAVE_NETDEV_NESTED_PRIV +#define HAVE_TIMEOUT_TXQUEUE_IDX +#define HAVE_NET_PREFETCH_API +#define HAVE_DEV_PAGE_IS_REUSABLE_API +#define HAVE_NDO_XSK_WAKEUP +#define HAVE_NDO_FDB_ADD_EXTACK +#define HAVE_NDO_BRIDGE_SETLINK_EXTACK +#define HAVE_NDO_ETH_IOCTL +#define HAVE_NDO_SET_VF_LINK_STATE +#define HAVE_MACVLAN_OFFLOAD_SUPPORT +#define SXE_KERNEL_MATCHED +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_vercode.h b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_vercode.h new file mode 100644 index 000000000000..2794ce11b799 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/compat/sxe_compat_vercode.h @@ -0,0 +1,61 @@ +#ifndef __SXE_COMPAT_VERCODE_H__ +#define __SXE_COMPAT_VERCODE_H__ + + +#ifndef LINUX_VERSION_CODE +#include +#else +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifndef UTS_RELEASE +#include +#endif + +#ifndef RHEL_RELEASE_CODE +#define RHEL_RELEASE_CODE 0 +#endif +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#define UBUNTU_VERSION(a,b,c,d) (((a) << 24) + ((b) << 16) + (d)) + +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else +#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ + UTS_UBUNTU_RELEASE_ABI) +#if UTS_UBUNTU_RELEASE_ABI > 65535 +#error UTS_UBUNTU_RELEASE_ABI is larger than 65535... +#endif +#endif + +#ifndef OPENEULER_VERSION_CODE +#define OPENEULER_VERSION_CODE 0 +#endif +#ifndef OPENEULER_VERSION +#define OPENEULER_VERSION(a,b) (((a) << 8) + (b)) +#endif + +#ifndef KYLIN_RELEASE_CODE +#define KYLIN_RELEASE_CODE 0 +#endif +#ifndef KYLIN_RELEASE_VERSION +#define KYLIN_RELEASE_VERSION(a,b) ((a << 8) + b) +#endif + +#ifdef CONFIG_SUSE_KERNEL +#include +#endif +#ifndef SUSE_PRODUCT_CODE +#define SUSE_PRODUCT_CODE 0 +#endif +#ifndef SUSE_PRODUCT +#define SUSE_PRODUCT(product, version, patchlevel, auxrelease) \ + (((product) << 24) + ((version) << 16) + \ + ((patchlevel) << 8) + (auxrelease)) +#endif + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.c b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.c new file mode 100644 index 000000000000..1b5f889adbb9 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.c @@ -0,0 +1,1128 @@ +#include +#include +#include +#include +#include +#include +#include "sxe_log.h" +#include "sxe_compat.h" + +#if (defined SXE_DRIVER_DEBUG && defined __KERNEL__) || (defined SXE_DRIVER_TRACE) + +int time_for_file_name(char *buff, int buf_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + struct timeval tv; + struct tm td; + + do_gettimeofday(&tv); + time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); +#else + struct timespec64 tv; + struct tm td; + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); +#endif + return snprintf(buff, buf_len, "%04ld-%02d-%02d_%02d:%02d:%02d", + td.tm_year + 1900, td.tm_mon + 1, td.tm_mday, + td.tm_hour, td.tm_min, td.tm_sec); +} + +int sxe_file_write(struct file *file, char *buf, int len) +{ + int ret = 0; + + void *journal; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) + mm_segment_t old_fs; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0) + old_fs = get_fs(); + set_fs(get_ds()); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) +#else + old_fs = force_uaccess_begin(); +#endif + + journal = current->journal_info; + current->journal_info = NULL; + + if (!file){ + return 0; + } + + do{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,10,0) + ret = file->f_op->write(file, buf, len, &file->f_pos); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + ret = vfs_write(file, buf, len, &file->f_pos); +#else + ret = kernel_write(file, buf, len, &file->f_pos); +#endif + }while( ret == -EINTR ); + + if (ret >= 0) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + fsnotify_modify(file); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + if ( file->f_path.dentry) { + fsnotify_modify(file->f_path.dentry); + } +#endif + } + + current->journal_info = journal; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0) + set_fs(old_fs); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0) +#else + force_uaccess_end(old_fs); +#endif + + return ret; +} +#endif + +#if defined SXE_DRIVER_DEBUG && defined __KERNEL__ + +#define FILE_NAME_SIZE 128 +#define SXE_KLOG_OUT_WAIT (5 * HZ) +#define SWITCH_FILE +#define LOG_PATH_LEN 100 +#define DRV_LOG_FILE_SIZE_MIN_MB 10 +#define DRV_LOG_FILE_SIZE_MAX_MB 200 + +sxe_debug_t g_sxe_debug; +char g_log_path_str[LOG_PATH_LEN] = {0}; +char g_log_path_bin[LOG_PATH_LEN] = {0}; + +static char g_log_path[80] = {0}; +module_param_string(g_log_path, g_log_path, 80, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_path, "the path host driver will be saved(<80 chars) Default: /var/log"); + +static u32 g_log_file_size = 200; +module_param(g_log_file_size, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_file_size, + "single driver log file size(10MB ~ 200MB), Default: 200, Unit: MB"); + +static u32 g_log_space_size = 0; +module_param(g_log_space_size, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_space_size, + "the space allowed host driver log to be store, Default: 0(unlimited), Unit: MB"); + +static u32 g_log_tty = 0; +module_param(g_log_tty, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(g_log_tty, + "allow driver log(ERROR, WARN, INFO) output to tty console, Default: 0(not allowed)"); + +static inline int time_for_log(char *buff, int buf_len) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) + struct timeval tv; + struct tm td; + + do_gettimeofday(&tv); + time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); + + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_usec); +#else + struct timespec64 tv; + struct tm td; + ktime_get_real_ts64(&tv); + time64_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest*60, &td); + return snprintf(buff, buf_len, "[%04ld-%02d-%02d;%02d:%02d:%02d.%ld]", + td.tm_year + 1900, + td.tm_mon + 1, td.tm_mday, td.tm_hour, + td.tm_min, td.tm_sec, tv.tv_nsec*1000); +#endif +} + +static inline char *sxe_stack_top(void) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + ULong *ptr = (ULong*)(current->thread_info + 1); +#else + ULong *ptr = (ULong*)(task_thread_info(current) + 1 ); +#endif + return (char*)(ptr + 1); +} + +static inline sxe_thread_local_t *sxe_thread_local_get(sxe_thread_key_t * key) +{ + return (sxe_thread_local_t*)(sxe_stack_top() + key->offset); +} + +void sxe_thread_key_create(int size, sxe_thread_key_t *key) +{ + key->offset = g_sxe_debug.key_offset; + g_sxe_debug.key_offset += sizeof(sxe_thread_local_t) + size; +} + +void *sxe_thread_get_specific(sxe_thread_key_t *key) +{ + sxe_thread_local_t *local = sxe_thread_local_get(key); + if (local->magic != DEBUG_TRACE_MAGIC) + { + return NULL; + } + return (void*)local->data; +} + +void sxe_thread_clear_specific(sxe_thread_key_t *key) +{ + sxe_thread_local_t *local = sxe_thread_local_get(key); + local->magic = 0; +} + +int sxe_filter_file_add(char *name) +{ + debug_file_t *file = NULL; + + file = (debug_file_t*)kmalloc(sizeof(debug_file_t), GFP_ATOMIC); + if (!file){ + sxe_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strncpy(file->name, name, sizeof(file->name)); + INIT_LIST_HEAD(&file->list); + + list_add_rcu(&file->list, &g_sxe_debug.filter_file); + return 0; +} + +void sxe_filter_file_del(char *filename) +{ + debug_file_t *file = NULL; + + list_for_each_entry_rcu(file, &g_sxe_debug.filter_file, list){ + if(!strcmp(file->name, filename)){ + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + return; + } + } + return; +} + +void sxe_log_level_modify(u32 level) +{ + sxe_level_set(level); +} + +char* sxe_log_path_query(void) +{ +#ifndef __cplusplus + return g_log_path; +#else + return NULL; +#endif +} + +u32 sxe_log_space_size_query(void) +{ + return g_log_space_size; +} + +u32 sxe_log_file_size_query(void) +{ + return g_log_file_size; +} + +void sxe_log_file_size_modify(u32 size) +{ + g_log_file_size = size; +} + +u32 sxe_log_tty_query(void) +{ + return g_log_tty; +} + +#ifndef SXE_CFG_RELEASE +static inline int sxe_filter_file_print(const char *filename) +{ + debug_file_t *file; + rcu_read_lock(); + list_for_each_entry_rcu(file, &g_sxe_debug.filter_file, list){ + if(!strcmp(file->name, filename)){ + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +static inline int sxe_filter_func_print(const char *name) +{ + debug_func_t *func; + + rcu_read_lock(); + list_for_each_entry_rcu(func, &g_sxe_debug.filter_func, list){ + if(!strcmp(func->name, name)){ + rcu_read_unlock(); + return 1; + } + } + rcu_read_unlock(); + return 0; +} + +#endif +void sxe_filter_file_clear(void) +{ + debug_file_t *file = NULL; + + do{ + file = list_first_or_null_rcu( + &g_sxe_debug.filter_file, + debug_file_t, + list); + if (file){ + list_del_rcu(&file->list); + synchronize_rcu(); + kfree(file); + } + }while(file); + + return; +} + +int sxe_filter_func_add(char *name) +{ + debug_func_t *func = NULL; + + func = (debug_func_t *)kmalloc(sizeof(debug_func_t), GFP_ATOMIC); + if (!func){ + sxe_print(KERN_ERR,NULL, "kmalloc size %lu failed\n", PAGE_SIZE); + return -ENOMEM; + } + strncpy(func->name, name, sizeof(func->name)); + INIT_LIST_HEAD(&func->list); + + list_add_rcu(&func->list, &g_sxe_debug.filter_func); + return 0; +} + +void sxe_filter_func_del(char *name) +{ + debug_func_t *func = NULL; + + list_for_each_entry_rcu(func, &g_sxe_debug.filter_func, list){ + if(!strcmp(func->name, name)){ + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + return; + } + } + return; +} + +void sxe_filter_func_clear(void) +{ + debug_func_t *func = NULL; + + do{ + func = list_first_or_null_rcu( + &g_sxe_debug.filter_func, + debug_func_t, + list); + if (func){ + list_del_rcu(&func->list); + synchronize_rcu(); + kfree(func); + } + }while(func); + + return; +} + +static void sxe_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +static int sxe_file_open(sxe_log_t *log, struct file **pp_file) +{ + struct file *file; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int flags_rewrite = O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC; + int err = 0; + int len = 0; + char filename[FILE_NAME_SIZE]; + +#ifdef SWITCH_FILE + memset(filename, 0, FILE_NAME_SIZE); + len += snprintf(filename, PAGE_SIZE, "%s", log->file_path); + if (log->file_num == 0) { + time_for_file_name(filename + len, FILE_NAME_SIZE - len); + } else { + snprintf(filename + len, FILE_NAME_SIZE - len, "%04d", log->index++); + log->index = log->index % log->file_num; + } + + if(log->file_num == 1 && log->file != NULL) { + sxe_file_close(&log->file); + log->file_pos = 0; + } +#else + memset(filename, 0, FILE_NAME_SIZE); + strncpy(filename, path, FILE_NAME_SIZE); +#endif + if (log->file_num == 0) { + file = filp_open(filename, flags_new, 0666); + } else { + file = filp_open(filename, flags_rewrite, 0666); + if (IS_ERR(file)) { + err = (int)PTR_ERR(file); + if (err == -ENOENT) { + file = filp_open(filename, flags_new, 0666); + } + } + } + if (IS_ERR(file)){ + err = (int)PTR_ERR(file); + sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", filename, err); + goto l_out; + } + mapping_set_gfp_mask(file->f_path.dentry->d_inode->i_mapping, GFP_NOFS); + + sxe_print(,NULL,"redirect file %s\n", filename); + + *pp_file = file; + +l_out: + return err; +} + +static void sxe_file_sync(struct file *file) +{ + struct address_space *mapping; + void *journal; + int ret = 0; + int err; + + (void)ret; + (void)err; + + if( !file || !file->f_op || !file->f_op->fsync ){ + goto l_end; + } + + journal = current->journal_info; + current->journal_info = NULL; + + mapping = file->f_mapping; + + ret = filemap_fdatawrite(mapping); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + mutex_lock(&mapping->host->i_mutex); + err = file->f_op->fsync(file, file->f_path.dentry, 1); + if( !ret ){ + ret = err; + } + mutex_unlock(&mapping->host->i_mutex); + err = filemap_fdatawait(mapping); + if( !ret ){ + ret = err; + } + +#else + err = file->f_op->fsync(file, 0, file->f_mapping->host->i_size, 1); +#endif + + current->journal_info = journal; + +l_end: + return; +} + +static void sxe_klog_in(sxe_log_t *log, char *buf, const int len) +{ + int begin = 0; + int end = 0; + int free_size; + ULong flags; + + spin_lock_irqsave(&log->lock, flags); + + if (log->head > log->tail) { + sxe_print(KERN_WARNING, NULL, "FAILURE: log head exceeds log tail\n"); + SXE_BUG_NO_SYNC(); + } + + free_size = log->buf_size - (log->tail - log->head); + + if (free_size <= len){ + log->is_drop = 1; + spin_unlock_irqrestore(&log->lock, flags); + return; + } + + begin = log->tail % log->buf_size; + end = (log->tail + len) % log->buf_size; + + if (begin < end){ + memcpy(log->buf + begin, buf, len); + } + else{ + memcpy(log->buf + begin, buf, log->buf_size - begin); + memcpy(log->buf, buf + log->buf_size - begin, end); + } + + log->tail = log->tail + len; + + spin_unlock_irqrestore(&log->lock, flags); + + return; +} + +static void sxe_klog_out(sxe_log_t *log) +{ + int len = 0; + int rc = 0; + long long tail; + int begin; + int end; + int schedule_count_th = 0; + const int max_loop = 4096; + +#ifdef SWITCH_FILE + struct file *file = NULL; +#endif + + if (log->file == NULL) { + rc = sxe_file_open(log, &log->file); + if (log->file != NULL) { + log->file_pos = 0; + } else { + return; + } + } + + do { + tail = log->tail; + begin = log->head % log->buf_size; + end = tail % log->buf_size; + len = 0; + rc = 0; + + schedule_count_th++; + if ((schedule_count_th >= max_loop)) { + schedule_count_th = 0; + schedule_timeout_interruptible(SXE_KLOG_OUT_WAIT); + } + + if (log->is_drop) { + rc = sxe_file_write( + log->file, + DEBUG_DROP_LOG_STRING, + strlen(DEBUG_DROP_LOG_STRING)); + if (rc < 0) { + break; + } + log->is_drop = 0; + } + + if (begin < end) { + rc = sxe_file_write( + log->file, + log->buf + begin, + end - begin); + if (rc > 0) { + len += rc; + } + } else if(begin > end) { + rc = sxe_file_write( + log->file, + log->buf + begin, + log->buf_size - begin); + if (rc > 0) { + len += rc; + rc = sxe_file_write(log->file, log->buf, end); + if (rc > 0) { + len += rc; + } + } + } + log->head += len; + log->file_pos += len; + + LOG_BUG_ON(log->head > log->tail, "FAILURE: log head exceeds log tail\n"); + }while (log->head != log->tail && rc > 0); + + if (rc < 0) { + sxe_print(KERN_ERR, NULL, "write file %s error %d\n", log->file_path, rc); + return ; + } + +#ifdef SWITCH_FILE + if (log->file_pos >= log->file_size) { + rc = sxe_file_open(log, &file); + if (rc >= 0 && log->file != NULL && log->file_num != 1) { + sxe_file_close(&log->file); + log->file = file; + log->file_pos = 0; + } + } +#endif + return ; +} + +static int sxe_klog_flush(void *arg) +{ + int i; + + while (!kthread_should_stop()){ + schedule_timeout_interruptible(SXE_KLOG_OUT_WAIT); + + for (i = 0; i < ARRAY_SIZE(g_sxe_debug.log); i++){ + sxe_klog_out(&g_sxe_debug.log[i]); + } + } + return 0; +} + +static int sxe_klog_init( + sxe_log_t *log, + long long buf_size, + char *file_path, + long long file_size, + u32 file_num) +{ + int rc = 0; + + memset(log, 0, sizeof(*log)); + spin_lock_init(&log->lock); + + log->buf = (char*)vmalloc(buf_size+PER_CPU_PAGE_SIZE); + if (!log->buf){ + rc = -ENOMEM; + goto l_end; + } + + log->file = NULL; + log->head = 0; + log->tail = 0; + log->buf_size = buf_size; + + log->file_path = file_path; + log->file_pos = 0; + log->file_size = file_size; + log->file_num = file_num; + log->index = 0; +l_end: + return rc; +} + +static void sxe_klog_exit(sxe_log_t *log) +{ + if (log->buf) { + vfree(log->buf); + } + if (log->file) { + sxe_file_close(&log->file); + } +} + +static inline char *sxe_file_name_locale(char *file) +{ + char *p_slash = strrchr(file, '/'); + return (p_slash == NULL)?file:(p_slash+1); +} + +void sxe_level_set(int level) +{ + g_sxe_debug.level = level; +} + +s32 sxe_level_get(void) +{ + return (s32)g_sxe_debug.level; +} + +void sxe_bin_status_set(bool status) +{ + g_sxe_debug.status = status; +} + +s32 sxe_bin_status_get(void) +{ + return (s32)g_sxe_debug.status; +} + +void sxe_log_string( + debug_level_e level, + const char *dev_name, + const char *file, + const char *func, + int line, + const char *fmt,...) +{ + sxe_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + ULong flags = 0; + const char *name = dev_name ? dev_name : ""; + + va_list args; + + if (level > g_sxe_debug.level){ +#ifndef SXE_CFG_RELEASE + if (!sxe_filter_file_print(file) + && !sxe_filter_func_print(func)){ + return; + } +#else + return; +#endif + } + + if (!in_interrupt()){ + local_irq_save(flags); + } + + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + + len = snprintf(buf, PAGE_SIZE, "%s", sxe_debug_level_name(level)); + len += time_for_log(buf+len, PAGE_SIZE - len); + len += snprintf(buf+len, PAGE_SIZE - len, "[%d][%d][%s]%s:%4d:%s:", + raw_smp_processor_id(), current->pid, + name, + sxe_file_name_locale((char*)file), line, func); + + va_start(args, fmt); + len += vsnprintf( + buf + len, + PAGE_SIZE - len, + fmt, + args); + va_end(args); + + + if (!in_interrupt()){ + local_irq_restore(flags); + } + + if (sxe_log_tty_query()) { + if (buf[0] == 'I' || buf[0] == 'W') { + printk_ratelimited(KERN_WARNING"%s", buf + LOG_INFO_PREFIX_LEN); + } else if (buf[0] == 'E') { + printk_ratelimited(KERN_WARNING"%s", buf + LOG_ERROR_PREFIX_LEN); + } + } + sxe_klog_in(&g_sxe_debug.log[DEBUG_TYPE_STRING], buf, len); + + wake_up_process(g_sxe_debug.task); + + return; +} + +void sxe_log_binary( + const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + sxe_ctxt_t *ctxt = NULL; + char *buf = NULL; + int len = 0; + ULong flags = 0; + u32 i = 0; + u32 j = 0; + u32 max; + u32 mod; + + if (sxe_bin_status_get() != true) { + return; + } + + max = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + if (!in_interrupt()){ + local_irq_save(flags); + } + + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, get_cpu()); + put_cpu(); + + buf = ctxt->buff; + + len += time_for_log(buf+len, PER_CPU_PAGE_SIZE - len); + len += snprintf(buf+len, PER_CPU_PAGE_SIZE - len, + "[%d] %s %s():%d %s size:%d\n", + current->pid, sxe_file_name_locale((char*)file), func, + line, str, size); + + for (i = 0; i < max; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (mod) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < size; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (!in_interrupt()) { + local_irq_restore(flags); + } + + sxe_klog_in(&g_sxe_debug.log[DEBUG_TYPE_BINARY], buf, len); + + wake_up_process(g_sxe_debug.task); + + return; +} + +void sxe_log_sync(void) +{ + sxe_file_sync(g_sxe_debug.log[DEBUG_TYPE_STRING].file); + sxe_file_sync(g_sxe_debug.log[DEBUG_TYPE_BINARY].file); +} + +static void sxe_log_file_prefix_add(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s%s.", log_path_p, BINARY_FILE_PREFIX); + } + + return; +} + +static void sxe_log_file_prefix_add_default(bool is_vf, char *log_path_p) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, VF_BINARY_FILE_PREFIX); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s/%s.", log_path_p, LOG_FILE_PREFIX); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s/%s.", log_path_p, BINARY_FILE_PREFIX); + } + + return; +} + +static void sxe_log_file_path_set(bool is_vf) +{ + if (is_vf) { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", VF_LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", VF_BINARY_FILE_PATH); + } else { + snprintf(g_log_path_str, LOG_PATH_LEN, "%s.", LOG_FILE_PATH); + snprintf(g_log_path_bin, LOG_PATH_LEN, "%s.", BINARY_FILE_PATH); + } + + return; +} + +int sxe_log_init(bool is_vf) +{ + struct task_struct *task = NULL; + sxe_ctxt_t *ctxt = NULL; + int rc = 0; + int i; + int nid; + u32 file_num = 0; + u32 log_path_len = 0; + u32 input_log_space = sxe_log_space_size_query(); + u32 input_log_file_size = sxe_log_file_size_query(); + unsigned int log_file_size = 0; + char *log_path_p = NULL; + sxe_log_t *log_bin = &g_sxe_debug.log[DEBUG_TYPE_BINARY]; + sxe_log_t *log_str = &g_sxe_debug.log[DEBUG_TYPE_STRING]; + + INIT_LIST_HEAD(&g_sxe_debug.filter_file); + INIT_LIST_HEAD(&g_sxe_debug.filter_func); + +#ifdef SXE_CFG_RELEASE + g_sxe_debug.level = LEVEL_INFO; + g_sxe_debug.status = false; +#else + g_sxe_debug.level = LEVEL_DEBUG; + g_sxe_debug.status = true; +#endif + + g_sxe_debug.ctxt = alloc_percpu(sxe_ctxt_t); + if (!g_sxe_debug.ctxt) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "alloc percpu failed\n"); + goto l_end; + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + memset(ctxt, 0, sizeof(*ctxt)); + } + + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + nid = cpu_to_node(i); + + ctxt->page = alloc_pages_node(nid, GFP_ATOMIC, PAGE_ORDER); + if (!ctxt->page) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "kmalloc size %lu failed\n", + PER_CPU_PAGE_SIZE); + goto l_free_cpu_buff; + } + ctxt->buff = page_address(ctxt->page); + } + + log_path_p = sxe_log_path_query(); + log_path_len = strlen(log_path_p); + if (log_path_p != NULL && log_path_p[0] == '/') { + if (log_path_p[log_path_len] == '/') { + sxe_log_file_prefix_add(is_vf, log_path_p); + } else { + sxe_log_file_prefix_add_default(is_vf, log_path_p); + } + } else { + sxe_log_file_path_set(is_vf); + } + if (input_log_file_size < DRV_LOG_FILE_SIZE_MIN_MB || + input_log_file_size > DRV_LOG_FILE_SIZE_MAX_MB) { + sxe_log_file_size_modify(LOG_FILE_SIZE >> MEGABYTE); + input_log_file_size = LOG_FILE_SIZE >> MEGABYTE; + } + if (input_log_space && input_log_space < input_log_file_size) { + sxe_log_file_size_modify(input_log_space); + input_log_file_size = input_log_space; + } + log_file_size = input_log_file_size << MEGABYTE; + + if (input_log_space) { + file_num = input_log_space / input_log_file_size; + if (file_num == 0) { + sxe_print(KERN_ERR, NULL, "filenum shouldnot be 0\n"); + SXE_BUG(); + } + } else { + file_num = 0; + } + + rc = sxe_klog_init( + log_str, + BUF_SIZE, + g_log_path_str, + log_file_size, + file_num); + if (rc < 0) { + goto l_free_cpu_buff; + } + + rc = sxe_klog_init( + log_bin, + BUF_SIZE, + g_log_path_bin, + BINARY_FILE_SIZE, + 0); + if (rc < 0) { + goto l_free_string; + } + + task = kthread_create(sxe_klog_flush, NULL, "sxe_klog_flush"); + if (IS_ERR(task)) { + rc = (int)PTR_ERR(task); + sxe_print(KERN_ERR, NULL, "Create kernel thread, err: %d\n", rc); + goto l_free_binary; + } + wake_up_process(task); + g_sxe_debug.task = task; + rc = 0; + sxe_print(KERN_INFO, NULL, "sxe debug init logpath[%s] strlogsize[%dM] filenum[%d]\n", + g_log_path_str, (log_file_size >> MEGABYTE), log_str->file_num); +l_end: + return rc; + +l_free_binary: + sxe_klog_exit(&g_sxe_debug.log[DEBUG_TYPE_BINARY]); + +l_free_string: + sxe_klog_exit(&g_sxe_debug.log[DEBUG_TYPE_STRING]); + +l_free_cpu_buff: + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + if (ctxt && ctxt->page) { + __free_page(ctxt->page); + } + } + free_percpu(g_sxe_debug.ctxt); + goto l_end; +} + +void sxe_log_exit(void) +{ + int i = 0; + sxe_ctxt_t *ctxt; + + if (g_sxe_debug.task == NULL) { + return; + } + + kthread_stop(g_sxe_debug.task); + + for (i = 0; i < ARRAY_SIZE(g_sxe_debug.log); i++) { + sxe_klog_exit(&g_sxe_debug.log[i]); + } + + if (g_sxe_debug.ctxt) { + for_each_possible_cpu(i) { + ctxt = per_cpu_ptr(g_sxe_debug.ctxt, i); + if (ctxt && ctxt->page) { + __free_page(ctxt->page); + } + } + + free_percpu(g_sxe_debug.ctxt); + g_sxe_debug.ctxt = NULL; + } +} + +#elif !defined SXE_DRIVER_RELEASE + +s32 g_sxe_log_level = LEVEL_INFO; +s32 g_sxe_bin_status = false; +char *test_bin_buf = NULL; + +s32 sxe_log_init(bool is_vf) +{ + return 0; +} + +void sxe_level_set(s32 level) +{ + g_sxe_log_level = level; +} + +s32 sxe_level_get(void) +{ + return g_sxe_log_level; +} + +void sxe_bin_status_set(bool status) +{ + g_sxe_bin_status = status; +} + +s32 sxe_bin_status_get(void) +{ + return g_sxe_bin_status; +} + +void sxe_log_sync(void) +{ +} + +void sxe_log_exit(void) +{ + if (test_bin_buf != NULL) { + free(test_bin_buf); + } +} + +void sxe_log_binary( + const char *file, + const char *func, + int line, + u8 *ptr, + u64 addr, + u32 size, + char *str) +{ +#define LINE_TOTAL 16 + u32 i = 0; + u32 j = 0; + u32 iMax; + u32 mod; + char *buf = NULL; + int len = 0; + + if (sxe_bin_status_get() != true) { + return; + } + + buf = zalloc(PER_CPU_PAGE_SIZE); + test_bin_buf = buf; + + iMax = size / LINE_TOTAL; + mod = size % LINE_TOTAL; + + len += snprintf(buf+len, PER_CPU_PAGE_SIZE - len, + "%s size:%d\n", str, size); + + for (i = 0; i < iMax; i++) { + j = i * LINE_TOTAL; + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < (i + 1) * LINE_TOTAL; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + if (mod) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%llx 0x%llx: ", + addr, (u64)&ptr[j]); + + for (; j < size; j++) { + len += snprintf(buf + len, PER_CPU_PAGE_SIZE - len, + "0x%02x%c ", ptr[j], ','); + } + + len += snprintf(buf + len, PER_CPU_PAGE_SIZE -len, "%c", '\n'); + } + + printf("buf:%s", buf); + + return; +} + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.h b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.h new file mode 100644 index 000000000000..1712f9317937 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log.h @@ -0,0 +1,425 @@ + +#ifndef _SXE_LOG_H_ +#define _SXE_LOG_H_ + +#include "sxe_log_types.h" + +#ifdef SXE_TEST +#define STATIC +#else +#define STATIC static +#endif + +#ifdef __cplusplus +extern "C"{ +#endif + +#define SXE_HOST(ins) (ins)->host->host_no + +#define LOG_INFO_PREFIX_LEN 32 +#define LOG_ERROR_PREFIX_LEN 33 +#define MEGABYTE 20 + +typedef enum { + LEVEL_ERROR, + LEVEL_WARN, + LEVEL_INFO, + LEVEL_DEBUG, +}debug_level_e; + +static inline const S8 *sxe_debug_level_name(debug_level_e lv) +{ + static const S8 *level[] = { + [LEVEL_ERROR] = "ERROR", + [LEVEL_WARN] = "WARN", + [LEVEL_INFO] = "INFO", + [LEVEL_DEBUG] = "DEBUG", + }; + + return level[lv]; +} + +#ifdef __KERNEL__ + +#define PRINT_DEBUG KERN_DEBUG +#define PRINT_INFO KERN_INFO +#define PRINT_WARN KERN_WARNING +#define PRINT_ERR KERN_ERR + +#define sxe_print(level,bdf, fmt,...) \ + printk(level"[SXE]%s():%d:" fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#else + +#define PRINT_DEBUG LEVEL_DEBUG +#define PRINT_INFO LEVEL_INFO +#define PRINT_WARN LEVEL_WARN +#define PRINT_ERR LEVEL_ERROR + +#include +#include +#include +#include + +#define __percpu + +static inline U64 get_now_ms() { + struct timeval tv; + U64 timestamp = 0; + gettimeofday(&tv, NULL); + timestamp = tv.tv_sec * 1000 + tv.tv_usec/1000; + return timestamp; +} + +#define filename_printf(x) strrchr((x),'/')?strrchr((x),'/')+1:(x) + +#define sxe_print(level,bdf, fmt,...) do { \ + if (level <= sxe_level_get()) { \ + if (level == LEVEL_DEBUG) { \ + printf("DEBUG:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_INFO) { \ + printf("INFO:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } else if (level == LEVEL_WARN) { \ + printf("WARN:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + }else if (level == LEVEL_ERROR) { \ + printf("ERROR:%llu:%s:%s():%d:[%lu][%s];" fmt, get_now_ms(), filename_printf(__FILE__), \ + __FUNCTION__, __LINE__, pthread_self(), bdf ? bdf : "", ##__VA_ARGS__); \ + } \ + } \ +} while(0) + +#endif + +#define LOG_BUG_ON(cond, fmt, ...) do { \ + if((cond)) { \ + LOG_ERROR(fmt, ##__VA_ARGS__); \ + LOG_SYNC(); \ + BUG(); \ + } \ +}while(0) + +#define DEBUG_TRACE_MAGIC 0x456789 +#define BUF_SIZE (1024LL << 10) + +#define PAGE_ORDER 2 +#define PER_CPU_PAGE_SIZE (PAGE_SIZE * (1 << 2)) + +#define LOG_FILE_SIZE (200LL << 20) +#define BINARY_FILE_SIZE (200LL << 20) + +#define VF_LOG_FILE_PATH "/var/log/sxevf.log" +#define VF_LOG_FILE_PREFIX "sxevf.log" +#define VF_BINARY_FILE_PATH "/var/log/sxevf.bin" +#define VF_BINARY_FILE_PREFIX "sxevf.bin" + +#define LOG_FILE_PATH "/var/log/sxe.log" +#define LOG_FILE_PREFIX "sxe.log" +#define BINARY_FILE_PATH "/var/log/sxe.bin" +#define BINARY_FILE_PREFIX "sxe.bin" + +#define DEBUG_DROP_LOG_STRING "\nwarnning:drop some logs\n\n" + +enum { + DEBUG_TYPE_STRING, + DEBUG_TYPE_BINARY, + DEBUG_TYPE_NR, +}; + +typedef struct { + struct list_head list; + char name[64]; +} debug_func_t; + +typedef struct { + struct list_head list; + char name[64]; +} debug_file_t; + +typedef struct { + struct { + char *buf; + int buf_size; + long long head; + long long tail; + spinlock_t lock; + unsigned char is_drop; + }; + struct { + char *file_path; + struct file *file; + long long file_pos; + long long file_size; + U32 file_num; + U32 index; + }; +} sxe_log_t; + +typedef struct { + s32 magic; + char data[0]; +} sxe_thread_local_t; + +typedef struct { + struct page *page; + void *buff; +} sxe_ctxt_t; + +typedef struct { + s32 offset; +} sxe_thread_key_t; + +typedef struct { + debug_level_e level; + bool status; + u16 key_offset; + sxe_ctxt_t __percpu *ctxt; + struct list_head filter_func; + struct list_head filter_file; + struct task_struct *task; + sxe_log_t log[DEBUG_TYPE_NR]; +} sxe_debug_t; + +void sxe_level_set(int level); +s32 sxe_level_get(void); + +void sxe_bin_status_set(bool status); +s32 sxe_bin_status_get(void); + +int sxe_log_init(bool is_vf); +void sxe_log_exit(void); + +void sxe_log_string(debug_level_e level, const char *dev_name, const char *file, const char *func, + int line, const char *fmt, ...); + +void sxe_log_binary(const char *file, const char *func, int line, u8 *ptr, + u64 addr, u32 size, char *str); + +#define DATA_DUMP(ptr, size, str) \ + sxe_log_binary(__FILE__, __FUNCTION__, __LINE__, (u8*)ptr, 0, size, str) + +void sxe_log_sync(void); + +#ifdef SXE_DRIVER_TRACE +int time_for_file_name(char *buff, int buf_len); +int sxe_file_write(struct file *file, char *buf, int len); +#endif + +#if defined SXE_DRIVER_DEBUG && defined __KERNEL__ + +#define WRITE_LOG(level, bdf, fmt, ...) \ + sxe_log_string(level, bdf, __FILE__, __FUNCTION__, __LINE__, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG(fmt, ...) WRITE_LOG(LEVEL_DEBUG, NULL, fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) WRITE_LOG(LEVEL_INFO,NULL, fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) WRITE_LOG(LEVEL_WARN, NULL, fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) WRITE_LOG(LEVEL_ERROR, NULL, fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) WRITE_LOG(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) WRITE_LOG(LEVEL_INFO,adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) WRITE_LOG(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) WRITE_LOG(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_SYNC() sxe_log_sync() + +#define LOG_DEV_DEBUG(format, arg...) \ + dev_dbg(&adapter->pdev->dev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg) + +#define LOG_DEV_INFO(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg) + +#define LOG_DEV_WARN(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg) + +#define LOG_DEV_ERR(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg) + +#define LOG_MSG_DEBUG(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_DEBUG_BDF(format, ## arg) + +#define LOG_MSG_INFO(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_INFO_BDF(format, ## arg) + +#define LOG_MSG_WARN(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_WARN_BDF(format, ## arg) + +#define LOG_MSG_ERR(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); \ + LOG_ERROR_BDF(format, ## arg) + +#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: "format, ## arg); +#define LOG_PR_INFO(format, arg...) pr_info("sxe: "format, ## arg); +#define LOG_PR_WARN(format, arg...) pr_warn("sxe: "format, ## arg); +#define LOG_PR_ERR(format, arg...) pr_err("sxe: "format, ## arg); +#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: "format, ## arg); +#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: "format, ## arg); +#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: "format, ## arg); +#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: "format, ## arg); + +#else + +#if defined SXE_DRIVER_RELEASE + +#define LOG_DEBUG(fmt, ...) +#define LOG_INFO(fmt, ...) +#define LOG_WARN(fmt, ...) +#define LOG_ERROR(fmt, ...) + +#define UNUSED(x) (void)(x) + +#define LOG_DEBUG_BDF(fmt, ...) UNUSED(adapter) +#define LOG_INFO_BDF(fmt, ...) UNUSED(adapter) +#define LOG_WARN_BDF(fmt, ...) UNUSED(adapter) +#define LOG_ERROR_BDF(fmt, ...) UNUSED(adapter) + +#define LOG_DEV_DEBUG(format, arg...) \ + dev_dbg(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_INFO(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_WARN(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg); + +#define LOG_DEV_ERR(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg); + +#define LOG_MSG_DEBUG(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_INFO(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_WARN(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_MSG_ERR(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg); + +#define LOG_PR_DEBUG(format, arg...) pr_debug("sxe: "format, ## arg); +#define LOG_PR_INFO(format, arg...) pr_info("sxe: "format, ## arg); +#define LOG_PR_WARN(format, arg...) pr_warn("sxe: "format, ## arg); +#define LOG_PR_ERR(format, arg...) pr_err("sxe: "format, ## arg); +#define LOG_PRVF_DEBUG(format, arg...) pr_debug("sxevf: "format, ## arg); +#define LOG_PRVF_INFO(format, arg...) pr_info("sxevf: "format, ## arg); +#define LOG_PRVF_WARN(format, arg...) pr_warn("sxevf: "format, ## arg); +#define LOG_PRVF_ERR(format, arg...) pr_err("sxevf: "format, ## arg); + +#else + +#define LOG_DEBUG(fmt, ...) sxe_print(PRINT_DEBUG, "", fmt, ##__VA_ARGS__) +#define LOG_INFO(fmt, ...) sxe_print(PRINT_INFO, "", fmt, ##__VA_ARGS__) +#define LOG_WARN(fmt, ...) sxe_print(PRINT_WARN, "", fmt, ##__VA_ARGS__) +#define LOG_ERROR(fmt, ...) sxe_print(PRINT_ERR, "", fmt, ##__VA_ARGS__) + +#define LOG_DEBUG_BDF(fmt, ...) sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_INFO_BDF(fmt, ...) sxe_print(LEVEL_INFO,adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_WARN_BDF(fmt, ...) sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_ERROR_BDF(fmt, ...) sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_DEV_DEBUG(fmt, ...) \ + sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_INFO(fmt, ...) \ + sxe_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_WARN(fmt, ...) \ + sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_DEV_ERR(fmt, ...) \ + sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_MSG_DEBUG(msglvl, fmt, ...) \ + sxe_print(LEVEL_DEBUG, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_INFO(msglvl, fmt, ...) \ + sxe_print(LEVEL_INFO, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_WARN(msglvl, fmt, ...) \ + sxe_print(LEVEL_WARN, adapter->dev_name, fmt, ##__VA_ARGS__) +#define LOG_MSG_ERR(msglvl, fmt, ...) \ + sxe_print(LEVEL_ERROR, adapter->dev_name, fmt, ##__VA_ARGS__) + +#define LOG_PR_DEBUG(fmt, ...) \ + sxe_print(PRINT_DEBUG, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_INFO(fmt, ...) \ + sxe_print(PRINT_INFO, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_WARN(fmt, ...) \ + sxe_print(PRINT_WARN, "sxe", fmt, ##__VA_ARGS__); + +#define LOG_PR_ERR(fmt, ...) \ + sxe_print(PRINT_ERR, "sxe", fmt, ##__VA_ARGS__); +#define LOG_PRVF_DEBUG(fmt, ...) \ + sxe_print(PRINT_DEBUG, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_INFO(fmt, ...) \ + sxe_print(PRINT_INFO, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_WARN(fmt, ...) \ + sxe_print(PRINT_WARN, "sxevf", fmt, ##__VA_ARGS__); + +#define LOG_PRVF_ERR(fmt, ...) \ + sxe_print(PRINT_ERR, "sxevf", fmt, ##__VA_ARGS__); + +#endif + +#define LOG_SYNC() + +#endif + +#if defined SXE_DRIVER_RELEASE +#define SXE_BUG_ON(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +}while(0) + +#define SXE_BUG() +#define SXE_BUG_ON_NO_SYNC(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ +}while(0) + +#define SXE_BUG_NO_SYNC() +#else +#define SXE_BUG_ON(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_SYNC(); \ + } \ + BUG_ON(cond); \ +}while(0) + +#define SXE_BUG(void) do { \ + LOG_SYNC(); \ + BUG(void); \ +}while(0) + +#define SXE_BUG_ON_NO_SYNC(cond) do { \ + if((cond)) { \ + printk(KERN_ERR "BUG_ON's condition(%s) has been triggered\n", #cond); \ + LOG_ERROR("BUG_ON's condition(%s) has been triggered\n", #cond); \ + } \ + BUG_ON(cond); \ +}while(0) + +#define SXE_BUG_NO_SYNC(void) do { \ + BUG(void); \ +}while(0) + +#endif + +#ifdef __cplusplus +} +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log_types.h b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log_types.h new file mode 100644 index 000000000000..d302e7160f55 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/log/sxe_log_types.h @@ -0,0 +1,94 @@ + +#ifndef __SXE_LOG_TYPES_H__ +#define __SXE_LOG_TYPES_H__ + +#ifdef __cplusplus +extern "C"{ +#endif + +#include + +typedef unsigned char U8; +typedef unsigned short U16; +typedef unsigned int U32; +typedef unsigned long ULong; +typedef unsigned long long U64; + +typedef char S8; +typedef short S16; +typedef int S32; +typedef long Long; +typedef long long S64; + +#define SXE_FALSE 0 +#define SXE_TRUE 1 + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif + +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t)(&((TYPE *)0)->MEMBER)) +#endif + + +#ifndef SXE_MIN +#define SXE_MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif + + +#ifndef SXE_MAX +#define SXE_MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif + + +#ifndef SXE_MIN_NON_ZERO +#define SXE_MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \ + ((b) == 0 ? (a) : (SXE_MIN(a, b)))) +#endif + +#ifndef TYPEOF +#ifdef __cplusplus +#define TYPEOF decltype +#else +#define TYPEOF typeof +#endif +#endif + +#ifndef container_of +#ifndef PCLINT +#define container_of(ptr, type, member) ({ \ + const TYPEOF( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) +#else +#define container_of(ptr, type, member) \ + ((type *)(void *)(char *)ptr) +#endif +#endif + +#ifndef SXE_DESC +#define SXE_DESC(a) 1 +#endif + + +#ifndef SXE_IN +#define SXE_IN +#endif + +#ifndef SXE_OUT +#define SXE_OUT +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.c b/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.c new file mode 100644 index 000000000000..af20cb66462b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.c @@ -0,0 +1,226 @@ + +#ifdef SXE_DRIVER_TRACE + +#include +#include + +#include "sxe_trace.h" +#include "sxe_ring.h" +#include "sxe_log.h" + +#define SXE_FILE_NAME_LEN (256) +#define SXE_TRACE_NS_MASK (0xFFFFFFFF) + +#define SXE_TRACE_BUF_CLEAN(buf, buf_size, len) \ + memset(buf, 0, buf_size); \ + len = 0; + +struct sxe_trace_tx_ring g_sxe_trace_tx[SXE_TXRX_RING_NUM_MAX] = {{ 0 }}; +struct sxe_trace_rx_ring g_sxe_trace_rx[SXE_TXRX_RING_NUM_MAX] = {{ 0 }}; + +void sxe_file_close(struct file **file) +{ + filp_close(*file, NULL); + *file = NULL; +} + +void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab) +{ + if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) || unlikely(lab >= SXE_TRACE_LAB_TX_MAX)) { + return; + } + + if (unlikely(lab == 0)) { + g_sxe_trace_tx[ring_idx].next++; + g_sxe_trace_tx[ring_idx].next &= SXE_TRACE_PER_RING_MASK; + memset(&g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next], 0, + sizeof(g_sxe_trace_tx[ring_idx].timestamp[0])); + } + + g_sxe_trace_tx[ring_idx].timestamp[g_sxe_trace_tx[ring_idx].next][lab] = ktime_get_real_ns() & SXE_TRACE_NS_MASK; +} + +void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab) +{ + if (unlikely(ring_idx >= SXE_TXRX_RING_NUM_MAX) || unlikely(lab >= SXE_TRACE_LAB_RX_MAX)) { + return; + } + + if (unlikely(lab == 0)) { + g_sxe_trace_rx[ring_idx].next++; + g_sxe_trace_rx[ring_idx].next &= SXE_TRACE_PER_RING_MASK; + memset(&g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next], 0, + sizeof(g_sxe_trace_rx[ring_idx].timestamp[0])); + } + + g_sxe_trace_rx[ring_idx].timestamp[g_sxe_trace_rx[ring_idx].next][lab] = ktime_get_real_ns() & SXE_TRACE_NS_MASK; +} + +static int sxe_trace_create_file(struct file **pp_file) +{ + char file_name[SXE_FILE_NAME_LEN] = {}; + int flags_new = O_CREAT | O_RDWR | O_APPEND | O_LARGEFILE; + int len = 0; + int rc = 0; + struct file *file; + + len += snprintf(file_name, sizeof(file_name), "%s.", SXE_TRACE_DUMP_FILE_NAME); + time_for_file_name(file_name + len, sizeof(file_name) - len); + + file = filp_open(file_name, flags_new, 0666); + if (IS_ERR(file)) { + rc = (int)PTR_ERR(file); + sxe_print(KERN_ERR, NULL, "open file:%s failed[errno:%d]\n", file_name, rc); + goto l_out; + } + *pp_file = file; + +l_out: + return rc; +} + +static int sxe_trace_write_file(struct file *file) +{ + char * buff; + size_t buff_size = 2048; + int rc = 0; + int len = 0; + u64 spend = 0; + u64 times = 0; + u64 spend_total = 0; + u64 times_total = 0; + u64 start; + u64 end; + u32 i; + u32 j; + u32 k; + + buff = kzalloc(buff_size, GFP_KERNEL); + if (buff == NULL) { + rc = -ENOMEM; + sxe_print(KERN_ERR, NULL, "kzalloc %lu failed.\n", buff_size); + goto l_out; + } + + len += snprintf(buff + len, buff_size - len, "tx trace dump:\n"); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + for (i = 0; i < ARRAY_SIZE(g_sxe_trace_tx); i++) { + spend = 0; + times = 0; + for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) { + start = g_sxe_trace_tx[i].timestamp[j][SXE_TRACE_LAB_TX_START]; + end = g_sxe_trace_tx[i].timestamp[j][SXE_TRACE_LAB_TX_END]; + if (start == 0 || end == 0) { + continue; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "\ttx ring %d trace %d dump:", i, j); + for (k = 0; k < SXE_TRACE_LAB_TX_MAX; k++) { + len += snprintf(buff + len, buff_size - len, "%llu ", g_sxe_trace_tx[i].timestamp[j][k]); + } + len += snprintf(buff + len, buff_size - len, "spend: %llu\n", end - start); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend += end - start; + times++; + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "tx ring %d, spend %llu, times:%llu.\n", i, spend, times); + spend_total += spend; + times_total += times; + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "tx trace dump, spend_total: %llu, times_total: %llu.\n", + spend_total, times_total); + + len += snprintf(buff + len, buff_size - len, "rx trace dump:\n"); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend_total = 0; + times_total = 0; + for (i = 0; i < ARRAY_SIZE(g_sxe_trace_rx); i++) { + spend = 0; + times = 0; + for (j = 0; j < SXE_TRACE_NUM_PER_RING; j++) { + start = g_sxe_trace_rx[i].timestamp[j][SXE_TRACE_LAB_RX_START]; + end = g_sxe_trace_rx[i].timestamp[j][SXE_TRACE_LAB_RX_END]; + if (start == 0 || end == 0) { + continue; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "\trx ring %d trace %d dump:", i, j); + for (k = 0; k < SXE_TRACE_LAB_RX_MAX; k++) { + len += snprintf(buff + len, buff_size - len, "%llu ", g_sxe_trace_rx[i].timestamp[j][k]); + } + len += snprintf(buff + len, buff_size - len, "spend: %llu\n", end - start); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + spend += end - start; + times++; + } + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "rx ring %d, spend %llu, times:%llu:\n", i, spend, times); + spend_total += spend; + times_total += times; + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + } + + SXE_TRACE_BUF_CLEAN(buff, buff_size, len); + len += snprintf(buff + len, buff_size - len, "rx trace dump, spend_total: %llu, times_total: %llu.\n", + spend_total, times_total); + rc = sxe_file_write(file, buff, len); + if (rc < 0) { + goto l_out; + } + +l_out: + if (buff) { + kfree(buff); + } + if (rc < 0) { + sxe_print(KERN_ERR, NULL, "write file error %d\n", rc); + } + return rc; +} + +void sxe_trace_dump(void) +{ + struct file *file; + int rc = 0; + + rc = sxe_trace_create_file(&file); + if (file == NULL) { + goto l_out; + } + + rc = sxe_trace_write_file(file); + if (rc < 0) { + goto l_out; + } + +l_out: + if (file) { + sxe_file_close(&file); + } + return; +} + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.h b/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.h new file mode 100644 index 000000000000..fdeb450b5028 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/base/trace/sxe_trace.h @@ -0,0 +1,77 @@ + +#ifndef __SXE_TRACE_H__ +#define __SXE_TRACE_H__ + +#ifdef SXE_DRIVER_TRACE + +#define SXE_TRACE_NUM_PER_RING (2048) +#define SXE_TRACE_PER_RING_MASK (0x7FF) + +#ifndef SXE_TEST +#define SXE_TRACE_DUMP_FILE_NAME ("/var/log/sxe_trace_dump.log") +#else +#define SXE_TRACE_DUMP_FILE_NAME (".sxe_trace_dump.log") +#endif + +enum sxe_trace_lab_tx { + SXE_TRACE_LAB_TX_START = 0, + SXE_TRACE_LAB_TX_MAY_STOP, + SXE_TRACE_LAB_TX_VLAN, + SXE_TRACE_LAB_TX_DCB, + SXE_TRACE_LAB_TX_IPSEC, + SXE_TRACE_LAB_TX_TSO, + SXE_TRACE_LAB_TX_DESC, + SXE_TRACE_LAB_TX_PPT, + SXE_TRACE_LAB_TX_FDIR, + SXE_TRACE_LAB_TX_OL_INFO, + SXE_TRACE_LAB_TX_MAP, + SXE_TRACE_LAB_TX_SENT, + SXE_TRACE_LAB_TX_UPDATE, + SXE_TRACE_LAB_TX_MAY_STOP_2, + SXE_TRACE_LAB_TX_WRITE, + SXE_TRACE_LAB_TX_END, + SXE_TRACE_LAB_TX_MAX, +}; + +struct sxe_trace_tx_ring { + u64 next; + u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_TX_MAX]; +}; + +enum sxe_trace_lab_rx { + SXE_TRACE_LAB_RX_START = 0, + SXE_TRACE_LAB_RX_CLEAN, + SXE_TRACE_LAB_RX_UNMAP, + SXE_TRACE_LAB_RX_STATS, + SXE_TRACE_LAB_RX_HANG, + SXE_TRACE_LAB_RX_DONE, + SXE_TRACE_LAB_RX_WAKE, + SXE_TRACE_LAB_RX_END, + SXE_TRACE_LAB_RX_MAX, +}; + +struct sxe_trace_rx_ring { + u64 next; + u64 timestamp[SXE_TRACE_NUM_PER_RING][SXE_TRACE_LAB_RX_MAX]; +}; + +void sxe_trace_tx_add(u8 ring_idx, enum sxe_trace_lab_tx lab); + +void sxe_trace_rx_add(u8 ring_idx, enum sxe_trace_lab_rx lab); + +void sxe_trace_dump(void); + +#define SXE_TRACE_TX(r_idx, lab) \ + sxe_trace_tx_add(r_idx, lab) + +#define SXE_TRACE_RX(r_idx, lab) \ + sxe_trace_rx_add(r_idx, lab) + +#else +#define SXE_TRACE_TX(r_idx, lab) + +#define SXE_TRACE_RX(r_idx, lab) + +#endif +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/include/drv_msg.h b/drivers/net/ethernet/linkdata/sxevf/include/drv_msg.h new file mode 100644 index 000000000000..027c88ebfc23 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/drv_msg.h @@ -0,0 +1,19 @@ + +#ifndef __DRV_MSG_H__ +#define __DRV_MSG_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_VERSION_LEN 32 + + + + + +typedef struct sxe_version_resp { + U8 fw_version[SXE_VERSION_LEN]; +}sxe_version_resp_s; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/mgl/sxe_port.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/mgl/sxe_port.h new file mode 100644 index 000000000000..d2dfff6a5848 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/mgl/sxe_port.h @@ -0,0 +1,37 @@ +#ifndef __SXE_PORT_H__ +#define __SXE_PORT_H__ + +#if defined(__cplusplus) +extern "C" { +#endif + +#include "mgc_types.h" +#include "ps3_types.h" + +typedef enum MglPortCmdSetCode{ + MGL_CMD_PORT_SET_BASE = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 0), + MGL_CMD_PORT_SET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 1), + MGL_CMD_PORT_SET_LED = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 2), + MGL_CMD_SXE_SOC_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 3), + MGL_CMD_SXE_SFP_HTHRESHOLD = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 4), + MGL_CMD_SXE_SOC_RST = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 5), + MGL_CMD_SXE_SET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 6), + MGL_CMD_SXE_SET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 7), + MGL_CMD_SXE_OPT_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_SET, 8), +} MglPortCmdSetCode_e; + +typedef enum MglPortCmdGetCode{ + MGL_CMD_SXE_GET_REG = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 0), + MGL_CMD_SXE_GET_SOC_INFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 1), + MGL_CMD_SXE_LOG_EXPORT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 2), + MGL_CMD_SXE_REGS_DUMP = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 3), + MGL_CMD_SXE_GET_MFGINFO = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 4), + MGL_CMD_SXE_MAC_ADDR_GET = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 5), + MGL_CMD_SXE_GET_INSIGHT = MGL_MK_LIMIT(MGL_All_LIMIT, MGL_CMD_PORT, MGL_CMD_GET, 6), +} MglPortCmdGetCode_e; + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_cli.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_cli.h new file mode 100644 index 000000000000..f11b3343afe8 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_cli.h @@ -0,0 +1,210 @@ + +#ifndef __SXE_CLI_H__ +#define __SXE_CLI_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_VERION_LEN (32) +#define SXE_MAC_NUM (128) +#define SXE_PORT_TRANSCEIVER_LEN (32) +#define SXE_PORT_VENDOR_LEN (32) +#define SXE_CHIP_TYPE_LEN (32) +#define SXE_VPD_SN_LEN (16) +#define SXE_SOC_RST_TIME (0x93A80) +#define SXE_SFP_TEMP_THRESHOLD_INTERVAL (3) +#define MGC_TERMLOG_INFO_MAX_LEN (12 * 1024) +#define SXE_REGS_DUMP_MAX_LEN (12 * 1024) +#define SXE_PRODUCT_NAME_LEN (32) + +typedef enum sxe_led_mode { + SXE_IDENTIFY_LED_BLINK_ON = 0, + SXE_IDENTIFY_LED_BLINK_OFF, + SXE_IDENTIFY_LED_ON, + SXE_IDENTIFY_LED_OFF, + SXE_IDENTIFY_LED_RESET, +} sxe_led_mode_s; + +typedef struct sxe_led_ctrl { + U32 mode; + U32 duration; + +} sxe_led_ctrl_s; + +typedef struct sxe_led_ctrl_resp { + U32 ack; +} sxe_led_ctrl_resp_s; + +typedef enum PortLinkSpeed { + PORT_LINK_NO = 0, + PORT_LINK_100M = 1, + PORT_LINK_1G = 2, + PORT_LINK_10G = 3, +} PortLinkSpeed_e; + +typedef struct SysSocInfo { + S8 fwVer[SXE_VERION_LEN]; + S8 optVer[SXE_VERION_LEN]; + U8 socStatus; + U8 pad[3]; + S32 socTemp; + U64 chipId; + S8 chipType[SXE_CHIP_TYPE_LEN]; + S8 pba[SXE_VPD_SN_LEN]; + S8 productName[SXE_PRODUCT_NAME_LEN]; +} SysSocInfo_s; + +typedef struct SysPortInfo { + U64 mac[SXE_MAC_NUM]; + U8 isPortAbs; + U8 linkStat; + U8 linkSpeed; + + + U8 isSfp:1; + U8 isGetInfo:1; + U8 rvd:6; + S8 opticalModTemp; + U8 pad[3]; + S8 transceiverType[SXE_PORT_TRANSCEIVER_LEN]; + S8 vendorName[SXE_PORT_VENDOR_LEN]; + S8 vendorPn[SXE_PORT_VENDOR_LEN]; +} SysPortInfo_s; + +typedef struct SysInfoResp { + SysSocInfo_s socInfo; + SysPortInfo_s portInfo; +} SysInfoResp_s; + +typedef enum SfpTempTdMode { + SFP_TEMP_THRESHOLD_MODE_ALARM = 0, + SFP_TEMP_THRESHOLD_MODE_WARN, +} SfpTempTdMode_e; + +typedef struct SfpTempTdSet{ + U8 mode; + U8 pad[3]; + S8 hthreshold; + S8 lthreshold; +} SfpTempTdSet_s; + +typedef struct SxeLogExportResp { + U16 curLogLen; + U8 isEnd; + U8 pad; + S32 sessionId; + S8 data[0]; +} SxeLogExportResp_s; + +typedef enum SxeLogExportType { + SXE_LOG_EXPORT_REQ = 0, + SXE_LOG_EXPORT_FIN, + SXE_LOG_EXPORT_ABORT, +} SxeLogExportType_e; + +typedef struct SxeLogExportReq { + U8 isALLlog; + U8 cmdtype; + U8 isBegin; + U8 pad; + S32 sessionId; + U32 logLen; +} SxeLogExportReq_s; + +typedef struct SocRstReq { + U32 time; +} SocRstReq_s; + +typedef struct RegsDumpResp { + U32 curdwLen; + U8 data[0]; +} RegsDumpResp_s; + +enum { + SXE_MFG_PART_NUMBER_LEN = 8, + SXE_MFG_SERIAL_NUMBER_LEN = 16, + SXE_MFG_REVISION_LEN = 4, + SXE_MFG_OEM_STR_LEN = 64, + SXE_MFG_SXE_BOARD_ASSEMBLY_LEN = 32, + SXE_MFG_SXE_BOARD_TRACE_NUM_LEN = 16, + SXE_MFG_SXE_MAC_ADDR_CNT = 2, +}; + +typedef struct sxeMfgInfo { + U8 partNumber[SXE_MFG_PART_NUMBER_LEN]; + U8 serialNumber [SXE_MFG_SERIAL_NUMBER_LEN]; + U32 mfgDate; + U8 revision[SXE_MFG_REVISION_LEN]; + U32 reworkDate; + U8 pad[4]; + U64 macAddr[SXE_MFG_SXE_MAC_ADDR_CNT]; + U8 boardTraceNum[SXE_MFG_SXE_BOARD_TRACE_NUM_LEN]; + U8 boardAssembly[SXE_MFG_SXE_BOARD_ASSEMBLY_LEN]; + U8 extra1[SXE_MFG_OEM_STR_LEN]; + U8 extra2[SXE_MFG_OEM_STR_LEN]; +} sxeMfgInfo_t; + +typedef struct RegsDumpReq { + U32 baseAddr; + U32 dwLen; +} RegsDumpReq_s; + +typedef enum sxe_pcs_mode { + SXE_PCS_MODE_1000BASE_KX_WO = 0, + SXE_PCS_MODE_1000BASE_KX_W, + SXE_PCS_MODE_SGMII, + SXE_PCS_MODE_10GBASE_KR_WO, + SXE_PCS_MODE_AUTO_NEGT_73, + SXE_PCS_MODE_LPBK_PHY_TX2RX, + SXE_PCS_MODE_LPBK_PHY_RX2TX, + SXE_PCS_MODE_LPBK_PCS_RX2TX, + SXE_PCS_MODE_BUTT, +} sxe_pcs_mode_e; + +typedef enum sxe_remote_fault_mode { + SXE_REMOTE_FALUT_NO_ERROR = 0, + SXE_REMOTE_FALUT_OFFLINE, + SXE_REMOTE_FALUT_LINK_FAILURE, + SXE_REMOTE_FALUT_AUTO_NEGOTIATION, + SXE_REMOTE_UNKNOWN, +} sxe_remote_fault_e; + +typedef struct sxe_phy_cfg { + sxe_pcs_mode_e mode; + U32 mtu; +} sxe_pcs_cfg_s; + +typedef enum sxe_an_speed { + SXE_AN_SPEED_NO_LINK = 0, + SXE_AN_SPEED_100M, + SXE_AN_SPEED_1G, + SXE_AN_SPEED_10G, + SXE_AN_SPEED_UNKNOWN, +} sxe_an_speed_e; + +typedef enum sxe_phy_pause_cap { + SXE_PAUSE_CAP_NO_PAUSE = 0, + SXE_PAUSE_CAP_ASYMMETRIC_PAUSE, + SXE_PAUSE_CAP_SYMMETRIC_PAUSE, + SXE_PAUSE_CAP_BOTH_PAUSE, + SXE_PAUSE_CAP_UNKNOWN, +} sxe_phy_pause_cap_e; + +typedef enum sxe_phy_duplex_type { + SXE_FULL_DUPLEX = 0, + SXE_HALF_DUPLEX = 1, + SXE_UNKNOWN_DUPLEX, +} sxe_phy_duplex_type_e; + +typedef struct sxe_phy_an_cap { + sxe_remote_fault_e remote_fault; + sxe_phy_pause_cap_e pause_cap; + sxe_phy_duplex_type_e duplex_cap; +} sxe_phy_an_cap_s; + +typedef struct sxe_an_cap { + sxe_phy_an_cap_s local; + sxe_phy_an_cap_s peer; +} sxe_an_cap_s; +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_hdc.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_hdc.h new file mode 100644 index 000000000000..0df074aa01c0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_hdc.h @@ -0,0 +1,40 @@ + +#ifndef __SXE_HDC_H__ +#define __SXE_HDC_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define HDC_CACHE_TOTAL_LEN (16 *1024) +#define ONE_PACKET_LEN_MAX (1024) +#define DWORD_NUM (256) +#define HDC_TRANS_RETRY_COUNT (3) + + +typedef enum SxeHdcErrnoCode { + PKG_OK = 0, + PKG_ERR_REQ_LEN, + PKG_ERR_RESP_LEN, + PKG_ERR_PKG_SKIP, + PKG_ERR_NODATA, + PKG_ERR_PF_LK, + PKG_ERR_OTHER, +} SxeHdcErrnoCode_e; + +typedef union HdcHeader { + struct { + U8 pid:4; + U8 errCode:4; + U8 len; + U16 startPkg:1; + U16 endPkg:1; + U16 isRd:1; + U16 msi:1; + U16 totalLen:12; + } head; + U32 dw0; +} HdcHeader_u; + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_ioctl.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_ioctl.h new file mode 100644 index 000000000000..88e59b2cc658 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_ioctl.h @@ -0,0 +1,18 @@ +#ifndef _SXE_IOCTL_H_ +#define _SXE_IOCTL_H_ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +struct SxeIoctlSyncCmd { + U64 traceid; + void *inData; + U32 inLen; + void *outData; + U32 outLen; +}; + +#define SXE_CMD_IOCTL_SYNC_CMD _IOWR('M', 1, struct SxeIoctlSyncCmd) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_msg.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_msg.h new file mode 100644 index 000000000000..113a9136c27a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_msg.h @@ -0,0 +1,136 @@ + +#ifndef __SXE_MSG_H__ +#define __SXE_MSG_H__ + +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif + +#define SXE_MAC_ADDR_LEN 6 + +#define SXE_HDC_CMD_HDR_SIZE sizeof(struct sxe_hdc_cmd_hdr) +#define SXE_HDC_MSG_HDR_SIZE sizeof(struct sxe_hdc_drv_cmd_msg) + +enum sxe_cmd_type { + SXE_CMD_TYPE_CLI, + SXE_CMD_TYPE_DRV, + SXE_CMD_TYPE_UNKOWN, +}; + +typedef struct sxe_hdc_cmd_hdr { + U8 cmd_type; + U8 cmd_sub_type; + U8 reserve[6]; +}sxe_hdc_cmd_hdr_s; + + + +typedef enum SxeFWState { + SXE_FW_START_STATE_UNDEFINED = 0x00, + SXE_FW_START_STATE_INIT_BASE = 0x10, + SXE_FW_START_STATE_SCAN_DEVICE = 0x20, + SXE_FW_START_STATE_FINISHED = 0x30, + SXE_FW_START_STATE_UPGRADE = 0x31, + SXE_FW_RUNNING_STATE_ABNOMAL = 0x40, + SXE_FW_START_STATE_MASK = 0xF0, +}SxeFWState_e; + +typedef struct SxeFWStateInfo { + U8 socStatus; + char statBuff[32]; +} SxeFWStateInfo_s; + + +typedef enum MsiEvt { + MSI_EVT_SOC_STATUS = 0x1, + MSI_EVT_HDC_FWOV = 0x2, + MSI_EVT_HDC_TIME_SYNC = 0x4, + + MSI_EVT_MAX = 0x80000000, +} MsiEvt_u; + + +typedef enum SxeFwHdcState { + SXE_FW_HDC_TRANSACTION_IDLE = 0x01, + SXE_FW_HDC_TRANSACTION_BUSY, + + SXE_FW_HDC_TRANSACTION_ERR, +} SxeFwHdcState_e; + +enum sxe_hdc_cmd_opcode { + SXE_CMD_SET_WOL = 1, + SXE_CMD_LED_CTRL, + SXE_CMD_SFP_READ, + SXE_CMD_SFP_WRITE, + SXE_CMD_TX_DIS_CTRL = 5, + SXE_CMD_TINE_SYNC, + SXE_CMD_RATE_SELECT, + SXE_CMD_R0_MAC_GET, + SXE_CMD_LOG_EXPORT, + SXE_CMD_FW_VER_GET = 10, + SXE_CMD_PCS_SDS_INIT, + SXE_CMD_AN_SPEED_GET, + SXE_CMD_AN_CAP_GET, + SXE_CMD_GET_SOC_INFO, + SXE_CMD_MNG_RST = 15, + + SXE_CMD_MAX, +}; + +enum sxe_hdc_cmd_errcode { + SXE_ERR_INVALID_PARAM = 1, +}; + +typedef struct sxe_hdc_drv_cmd_msg { + + U16 opcode; + U16 errcode; + union dataLength { + U16 req_len; + U16 ack_len; + } length; + U8 reserve[8]; + U64 traceid; + U8 body[0]; +} sxe_hdc_drv_cmd_msg_s; + + +typedef struct sxe_sfp_rw_req { + U16 offset; + U16 len; + U8 write_data[0]; +} sxe_sfp_rw_req_s; + + +typedef struct sxe_sfp_read_resp { + U16 len; + U8 resp[0]; +} sxe_sfp_read_resp_s; + +typedef enum sxe_sfp_rate{ + SXE_SFP_RATE_1G = 0, + SXE_SFP_RATE_10G = 1, +} sxe_sfp_rate_e; + + +typedef struct sxe_sfp_rate_able { + sxe_sfp_rate_e rate; +} sxe_sfp_rate_able_s; + + +typedef struct sxe_spp_tx_able { + BOOL isDisable; +} sxe_spp_tx_able_s; + + +typedef struct sxe_default_mac_addr_resp { + U8 addr[SXE_MAC_ADDR_LEN]; +} sxe_default_mac_addr_resp_s; + + +typedef struct sxe_mng_rst { + BOOL enable; +} sxe_mng_rst_s; + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_regs.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_regs.h new file mode 100644 index 000000000000..aa68a0047f7e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe/sxe_regs.h @@ -0,0 +1,1273 @@ + +#ifndef __SXE_REGS_H__ +#define __SXE_REGS_H__ + +#define SXE_LINKSEC_MAX_SC_COUNT 1 +#define SXE_LINKSEC_MAX_SA_COUNT 2 + +#define SXE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 + + +#define SXE_REG_READ_FAIL 0xffffffffU +#define SXE_REG_READ_RETRY 5 +#ifdef SXE_TEST +#define SXE_PCI_MASTER_DISABLE_TIMEOUT (1) +#else +#define SXE_PCI_MASTER_DISABLE_TIMEOUT (800) +#endif + + +#define SXE_CTRL 0x00000 +#define SXE_STATUS 0x00008 +#define SXE_CTRL_EXT 0x00018 + + +#define SXE_CTRL_LNK_RST 0x00000008 +#define SXE_CTRL_RST 0x04000000 + +#ifdef SXE_TEST +#define SXE_CTRL_RST_MASK (0) +#define SXE_CTRL_GIO_DIS (0) +#else +#define SXE_CTRL_RST_MASK (SXE_CTRL_LNK_RST | SXE_CTRL_RST) +#define SXE_CTRL_GIO_DIS 0x00000004 +#endif + + +#define SXE_STATUS_GIO 0x00080000 + + +#define SXE_CTRL_EXT_PFRSTD 0x00004000 +#define SXE_CTRL_EXT_NS_DIS 0x00010000 +#define SXE_CTRL_EXT_DRV_LOAD 0x10000000 + + +#define SXE_FCRTL(_i) (0x03220 + ((_i) * 4)) +#define SXE_FCRTH(_i) (0x03260 + ((_i) * 4)) +#define SXE_FCCFG 0x03D00 + + +#define SXE_FCRTL_XONE 0x80000000 +#define SXE_FCRTH_FCEN 0x80000000 + +#define SXE_FCCFG_TFCE_802_3X 0x00000008 +#define SXE_FCCFG_TFCE_PRIORITY 0x00000010 + + +#define SXE_GCR_EXT 0x11050 + + +#define SXE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define SXE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define SXE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define SXE_GCR_CAP_VER2 0x00040000 +#define SXE_GCR_EXT_MSIX_EN 0x80000000 +#define SXE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define SXE_GCR_EXT_VT_MODE_16 0x00000001 +#define SXE_GCR_EXT_VT_MODE_32 0x00000002 +#define SXE_GCR_EXT_VT_MODE_64 0x00000003 +#define SXE_GCR_EXT_VT_MODE_MASK 0x00000003 +#define SXE_GCR_EXT_SRIOV (SXE_GCR_EXT_MSIX_EN | \ + SXE_GCR_EXT_VT_MODE_64) + +#define SXE_PCI_DEVICE_STATUS 0x7A +#define SXE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define SXE_PCI_LINK_STATUS 0x82 +#define SXE_PCI_DEVICE_CONTROL2 0x98 +#define SXE_PCI_LINK_WIDTH 0x3F0 +#define SXE_PCI_LINK_WIDTH_1 0x10 +#define SXE_PCI_LINK_WIDTH_2 0x20 +#define SXE_PCI_LINK_WIDTH_4 0x40 +#define SXE_PCI_LINK_WIDTH_8 0x80 +#define SXE_PCI_LINK_SPEED 0xF +#define SXE_PCI_LINK_SPEED_2500 0x1 +#define SXE_PCI_LINK_SPEED_5000 0x2 +#define SXE_PCI_LINK_SPEED_8000 0x3 +#define SXE_PCI_HEADER_TYPE_REGISTER 0x0E +#define SXE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define SXE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +#define SXE_PCIDEVCTRL2_TIMEO_MASK 0xf +#define SXE_PCIDEVCTRL2_16_32ms_def 0x0 +#define SXE_PCIDEVCTRL2_50_100us 0x1 +#define SXE_PCIDEVCTRL2_1_2ms 0x2 +#define SXE_PCIDEVCTRL2_16_32ms 0x5 +#define SXE_PCIDEVCTRL2_65_130ms 0x6 +#define SXE_PCIDEVCTRL2_260_520ms 0x9 +#define SXE_PCIDEVCTRL2_1_2s 0xa +#define SXE_PCIDEVCTRL2_4_8s 0xd +#define SXE_PCIDEVCTRL2_17_34s 0xe + + +#define SXE_EICR 0x00800 +#define SXE_EICS 0x00808 +#define SXE_EIMS 0x00880 +#define SXE_EIMC 0x00888 +#define SXE_EIAC 0x00810 +#define SXE_EIAM 0x00890 +#define SXE_EITRSEL 0x00894 +#define SXE_GPIE 0x00898 +#define SXE_IVAR(i) (0x00900 + (i) * 4) +#define SXE_IVAR_MISC 0x00A00 +#define SXE_EICS_EX(i) (0x00A90 + (i) * 4) +#define SXE_EIMS_EX(i) (0x00AA0 + (i) * 4) +#define SXE_EIMC_EX(i) (0x00AB0 + (i) * 4) +#define SXE_EIAM_EX(i) (0x00AD0 + (i) * 4) +#define SXE_EITR(i) (((i) <= 23) ? (0x00820 + ((i) * 4)) : \ + (0x012300 + (((i) - 24) * 4))) + +#define SXE_SPP_PROC 0x00AD8 +#define SXE_SPP_STATE 0x00AF4 + + + +#define SXE_EICR_RTX_QUEUE 0x0000FFFF +#define SXE_EICR_FLOW_NAV 0x00010000 +#define SXE_EICR_MAILBOX 0x00080000 +#define SXE_EICR_LSC 0x00100000 +#define SXE_EICR_LINKSEC 0x00200000 +#define SXE_EICR_ECC 0x10000000 +#define SXE_EICR_HDC 0x20000000 +#define SXE_EICR_TCP_TIMER 0x40000000 +#define SXE_EICR_OTHER 0x80000000 + + +#define SXE_EICS_RTX_QUEUE SXE_EICR_RTX_QUEUE +#define SXE_EICS_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EICS_MAILBOX SXE_EICR_MAILBOX +#define SXE_EICS_LSC SXE_EICR_LSC +#define SXE_EICS_ECC SXE_EICR_ECC +#define SXE_EICS_HDC SXE_EICR_HDC +#define SXE_EICS_TCP_TIMER SXE_EICR_TCP_TIMER +#define SXE_EICS_OTHER SXE_EICR_OTHER + + +#define SXE_EIMS_RTX_QUEUE SXE_EICR_RTX_QUEUE +#define SXE_EIMS_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EIMS_MAILBOX SXE_EICR_MAILBOX +#define SXE_EIMS_LSC SXE_EICR_LSC +#define SXE_EIMS_ECC SXE_EICR_ECC +#define SXE_EIMS_HDC SXE_EICR_HDC +#define SXE_EIMS_TCP_TIMER SXE_EICR_TCP_TIMER +#define SXE_EIMS_OTHER SXE_EICR_OTHER +#define SXE_EIMS_ENABLE_MASK (SXE_EIMS_RTX_QUEUE | SXE_EIMS_LSC | \ + SXE_EIMS_TCP_TIMER | SXE_EIMS_OTHER) + +#define SXE_EIMC_FLOW_NAV SXE_EICR_FLOW_NAV +#define SXE_EIMC_LSC SXE_EICR_LSC +#define SXE_EIMC_HDC SXE_EICR_HDC + + +#define SXE_GPIE_SPP0_EN 0x00000001 +#define SXE_GPIE_SPP1_EN 0x00000002 +#define SXE_GPIE_SPP2_EN 0x00000004 +#define SXE_GPIE_MSIX_MODE 0x00000010 +#define SXE_GPIE_OCD 0x00000020 +#define SXE_GPIE_EIMEN 0x00000040 +#define SXE_GPIE_EIAME 0x40000000 +#define SXE_GPIE_PBA_SUPPORT 0x80000000 +#define SXE_GPIE_VTMODE_MASK 0x0000C000 +#define SXE_GPIE_VTMODE_16 0x00004000 +#define SXE_GPIE_VTMODE_32 0x00008000 +#define SXE_GPIE_VTMODE_64 0x0000C000 + + +#define SXE_IVAR_ALLOC_VALID 0x80 + + +#define SXE_EITR_CNT_WDIS 0x80000000 +#define SXE_EITR_ITR_MASK 0x00000FF8 +#define SXE_EITR_ITR_SHIFT 2 +#define SXE_EITR_ITR_MAX (SXE_EITR_ITR_MASK >> SXE_EITR_ITR_SHIFT) + + +#define SXE_EICR_GPI_SPP0 0x01000000 +#define SXE_EICR_GPI_SPP1 0x02000000 +#define SXE_EICR_GPI_SPP2 0x04000000 +#define SXE_EIMS_GPI_SPP0 SXE_EICR_GPI_SPP0 +#define SXE_EIMS_GPI_SPP1 SXE_EICR_GPI_SPP1 +#define SXE_EIMS_GPI_SPP2 SXE_EICR_GPI_SPP2 + + +#define SXE_SPP_PROC_SPP2_TRIGGER 0x00300000 +#define SXE_SPP_PROC_SPP2_TRIGGER_MASK 0xFFCFFFFF +#define SXE_SPP_PROC_DELAY_US_MASK 0x0000FFFF +#define SXE_SPP_PROC_DELAY_US 0x00000007 + + +#define SXE_IRQ_CLEAR_MASK 0xFFFFFFFF + + +#define SXE_RXCSUM 0x05000 +#define SXE_RFCTL 0x05008 +#define SXE_FCTRL 0x05080 +#define SXE_EXVET 0x05078 +#define SXE_VLNCTRL 0x05088 +#define SXE_MCSTCTRL 0x05090 +#define SXE_ETQF(_i) (0x05128 + ((_i) * 4)) +#define SXE_ETQS(_i) (0x0EC00 + ((_i) * 4)) +#define SXE_SYNQF 0x0EC30 +#define SXE_MTA(_i) (0x05200 + ((_i) * 4)) +#define SXE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define SXE_VFTA(_i) (0x0A000 + ((_i) * 4)) +#define SXE_RAL(_i) (0x0A200 + ((_i) * 8)) +#define SXE_RAH(_i) (0x0A204 + ((_i) * 8)) +#define SXE_MPSAR_LOW(_i) (0x0A600 + ((_i) * 8)) +#define SXE_MPSAR_HIGH(_i) (0x0A604 + ((_i) * 8)) +#define SXE_PSRTYPE(_i) (0x0EA00 + ((_i) * 4)) +#define SXE_RETA(_i) (0x0EB00 + ((_i) * 4)) +#define SXE_RSSRK(_i) (0x0EB80 + ((_i) * 4)) +#define SXE_RQTC 0x0EC70 +#define SXE_MRQC 0x0EC80 +#define SXE_IEOI 0x0F654 +#define SXE_PL 0x0F658 +#define SXE_LPL 0x0F65C + + +#define SXE_ETQF_CNT 8 +#define SXE_MTA_CNT 128 +#define SXE_UTA_CNT 128 +#define SXE_VFTA_CNT 128 +#define SXE_RAR_CNT 128 +#define SXE_MPSAR_CNT 128 + + +#define SXE_EXVET_DEFAULT 0x81000000 +#define SXE_VLNCTRL_DEFAULT 0x8100 +#define SXE_IEOI_DEFAULT 0x060005DC +#define SXE_PL_DEFAULT 0x3e000016 +#define SXE_LPL_DEFAULT 0x26000000 + + +#define SXE_RXCSUM_IPPCSE 0x00001000 +#define SXE_RXCSUM_PCSD 0x00002000 + + +#define SXE_RFCTL_LRO_DIS 0x00000020 +#define SXE_RFCTL_NFSW_DIS 0x00000040 +#define SXE_RFCTL_NFSR_DIS 0x00000080 + + +#define SXE_FCTRL_SBP 0x00000002 +#define SXE_FCTRL_MPE 0x00000100 +#define SXE_FCTRL_UPE 0x00000200 +#define SXE_FCTRL_BAM 0x00000400 +#define SXE_FCTRL_PMCF 0x00001000 +#define SXE_FCTRL_DPF 0x00002000 + + +#define SXE_VLNCTRL_VET 0x0000FFFF +#define SXE_VLNCTRL_CFI 0x10000000 +#define SXE_VLNCTRL_CFIEN 0x20000000 +#define SXE_VLNCTRL_VFE 0x40000000 +#define SXE_VLNCTRL_VME 0x80000000 + +#define SXE_EXVET_VET_EXT_SHIFT 16 +#define SXE_EXTENDED_VLAN (1 << 26) + + +#define SXE_MCSTCTRL_MFE 4 + +#define SXE_ETQF_FILTER_EAPOL 0 +#define SXE_ETQF_FILTER_1588 3 +#define SXE_ETQF_FILTER_FIP 4 +#define SXE_ETQF_FILTER_LLDP 5 +#define SXE_ETQF_FILTER_LACP 6 +#define SXE_ETQF_FILTER_FC 7 +#define SXE_MAX_ETQF_FILTERS 8 +#define SXE_ETQF_1588 0x40000000 +#define SXE_ETQF_FILTER_EN 0x80000000 +#define SXE_ETQF_POOL_ENABLE BIT(26) +#define SXE_ETQF_POOL_SHIFT 20 + + +#define SXE_ETQS_RX_QUEUE 0x007F0000 +#define SXE_ETQS_RX_QUEUE_SHIFT 16 +#define SXE_ETQS_LLI 0x20000000 +#define SXE_ETQS_QUEUE_EN 0x80000000 + + +#define SXE_SYN_FILTER_ENABLE 0x00000001 +#define SXE_SYN_FILTER_QUEUE 0x000000FE +#define SXE_SYN_FILTER_QUEUE_SHIFT 1 +#define SXE_SYN_FILTER_SYNQFP 0x80000000 + + +#define SXE_RAH_VIND_MASK 0x003C0000 +#define SXE_RAH_VIND_SHIFT 18 +#define SXE_RAH_AV 0x80000000 +#define SXE_CLEAR_VMDQ_ALL 0xFFFFFFFF + + +#define SXE_PSRTYPE_TCPHDR 0x00000010 +#define SXE_PSRTYPE_UDPHDR 0x00000020 +#define SXE_PSRTYPE_IPV4HDR 0x00000100 +#define SXE_PSRTYPE_IPV6HDR 0x00000200 +#define SXE_PSRTYPE_L2HDR 0x00001000 + + +#define SXE_MRQC_RSSEN 0x00000001 +#define SXE_MRQC_MRQE_MASK 0xF +#define SXE_MRQC_RT8TCEN 0x00000002 +#define SXE_MRQC_RT4TCEN 0x00000003 +#define SXE_MRQC_RTRSS8TCEN 0x00000004 +#define SXE_MRQC_RTRSS4TCEN 0x00000005 +#define SXE_MRQC_VMDQEN 0x00000008 +#define SXE_MRQC_VMDQRSS32EN 0x0000000A +#define SXE_MRQC_VMDQRSS64EN 0x0000000B +#define SXE_MRQC_VMDQRT8TCEN 0x0000000C +#define SXE_MRQC_VMDQRT4TCEN 0x0000000D +#define SXE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define SXE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define SXE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define SXE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define SXE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define SXE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define SXE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define SXE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 + + +#define SXE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +#define SXE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +#define SXE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +#define SXE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +#define SXE_SRRCTL(_i) (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + (((_i) - 64) * 0x40))) +#define SXE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +#define SXE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + (((_i) - 64) * 0x40))) +#define SXE_LROCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + (((_i) - 64) * 0x40))) +#define SXE_RDRXCTL 0x02F00 +#define SXE_RXCTRL 0x03000 +#define SXE_LRODBU 0x03028 +#define SXE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + +#define SXE_DRXCFG (0x03C20) + + +#define SXE_RXDCTL_CNT 128 + + +#define SXE_RXDCTL_DEFAULT 0x40210 + + +#define SXE_SRRCTL_DROP_EN 0x10000000 +#define SXE_SRRCTL_BSIZEPKT_SHIFT (10) +#define SXE_SRRCTL_BSIZEHDRSIZE_SHIFT (2) +#define SXE_SRRCTL_DESCTYPE_DATA_ONEBUF 0x02000000 +#define SXE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define SXE_SRRCTL_BSIZEHDR_MASK 0x00003F00 + + +#define SXE_RXDCTL_ENABLE 0x02000000 +#define SXE_RXDCTL_SWFLSH 0x04000000 +#define SXE_RXDCTL_VME 0x40000000 +#define SXE_RXDCTL_DESC_FIFO_AE_TH_SHIFT 8 +#define SXE_RXDCTL_PREFETCH_NUM_CFG_SHIFT 16 + + +#define SXE_LROCTL_LROEN 0x01 +#define SXE_LROCTL_MAXDESC_1 0x00 +#define SXE_LROCTL_MAXDESC_4 0x04 +#define SXE_LROCTL_MAXDESC_8 0x08 +#define SXE_LROCTL_MAXDESC_16 0x0C + + +#define SXE_RDRXCTL_RDMTS_1_2 0x00000000 +#define SXE_RDRXCTL_RDMTS_EN 0x00200000 +#define SXE_RDRXCTL_CRCSTRIP 0x00000002 +#define SXE_RDRXCTL_PSP 0x00000004 +#define SXE_RDRXCTL_MVMEN 0x00000020 +#define SXE_RDRXCTL_DMAIDONE 0x00000008 +#define SXE_RDRXCTL_AGGDIS 0x00010000 +#define SXE_RDRXCTL_LROFRSTSIZE 0x003E0000 +#define SXE_RDRXCTL_LROLLIDIS 0x00800000 +#define SXE_RDRXCTL_LROACKC 0x02000000 +#define SXE_RDRXCTL_FCOE_WRFIX 0x04000000 +#define SXE_RDRXCTL_MBINTEN 0x10000000 +#define SXE_RDRXCTL_MDP_EN 0x20000000 +#define SXE_RDRXCTL_MPBEN 0x00000010 + +#define SXE_RDRXCTL_MCEN 0x00000040 + + + +#define SXE_RXCTRL_RXEN 0x00000001 + + +#define SXE_LRODBU_LROACKDIS 0x00000080 + + +#define SXE_DRXCFG_GSP_ZERO 0x00000002 +#define SXE_DRXCFG_DBURX_START 0x00000001 + + +#define SXE_DMATXCTL 0x04A80 +#define SXE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) +#define SXE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define SXE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define SXE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define SXE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define SXE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define SXE_PVFTDWBAL(p) (0x06038 + (0x40 * (p))) +#define SXE_PVFTDWBAH(p) (0x0603C + (0x40 * (p))) +#define SXE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) +#define SXE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) +#define SXE_MTQC 0x08120 +#define SXE_TXPBFCS 0x0CE00 +#define SXE_DTXCFG 0x0CE08 +#define SXE_DTMPCNT 0x0CE98 + + +#define SXE_DMATXCTL_DEFAULT 0x81000000 + + +#define SXE_DMATXCTL_TE 0x1 +#define SXE_DMATXCTL_GDV 0x8 +#define SXE_DMATXCTL_VT_SHIFT 16 +#define SXE_DMATXCTL_VT_MASK 0xFFFF0000 + + +#define SXE_TXDCTL_HTHRESH_SHIFT 8 +#define SXE_TXDCTL_WTHRESH_SHIFT 16 +#define SXE_TXDCTL_ENABLE 0x02000000 +#define SXE_TXDCTL_SWFLSH 0x04000000 + +#define SXE_PVFTDWBAL_N(ring_per_pool, vf_idx, vf_ring_idx) \ + SXE_PVFTDWBAL((ring_per_pool) * (vf_idx) + vf_ring_idx) +#define SXE_PVFTDWBAH_N(ring_per_pool, vf_idx, vf_ring_idx) \ + SXE_PVFTDWBAH((ring_per_pool) * (vf_idx) + vf_ring_idx) + + +#define SXE_MTQC_RT_ENA 0x1 +#define SXE_MTQC_VT_ENA 0x2 +#define SXE_MTQC_64Q_1PB 0x0 +#define SXE_MTQC_32VF 0x8 +#define SXE_MTQC_64VF 0x4 +#define SXE_MTQC_8TC_8TQ 0xC +#define SXE_MTQC_4TC_4TQ 0x8 + + +#define SXE_TFCS_PB0_MASK 0x1 +#define SXE_TFCS_PB1_MASK 0x2 +#define SXE_TFCS_PB2_MASK 0x4 +#define SXE_TFCS_PB3_MASK 0x8 +#define SXE_TFCS_PB4_MASK 0x10 +#define SXE_TFCS_PB5_MASK 0x20 +#define SXE_TFCS_PB6_MASK 0x40 +#define SXE_TFCS_PB7_MASK 0x80 +#define SXE_TFCS_PB_MASK 0xff + + +#define SXE_DTXCFG_DBUTX_START 0x00000001 +#define SXE_DTXCFG_DBUTX_BUF_ALFUL_CFG 0x20 + + +#define SXE_RTRPCS 0x02430 +#define SXE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) +#define SXE_RTRUP2TC 0x03020 +#define SXE_RTTDCS 0x04900 +#define SXE_RTTDQSEL 0x04904 +#define SXE_RTTDT1C 0x04908 +#define SXE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) +#define SXE_RTTBCNRM 0x04980 +#define SXE_RTTBCNRC 0x04984 +#define SXE_RTTUP2TC 0x0C800 +#define SXE_RTTPCS 0x0CD00 +#define SXE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) + + +#define SXE_RTRPCS_RRM 0x00000002 +#define SXE_RTRPCS_RAC 0x00000004 +#define SXE_RTRPCS_ARBDIS 0x00000040 + + +#define SXE_RTRPT4C_MCL_SHIFT 12 +#define SXE_RTRPT4C_BWG_SHIFT 9 +#define SXE_RTRPT4C_GSP 0x40000000 +#define SXE_RTRPT4C_LSP 0x80000000 + + +#define SXE_RTRUP2TC_UP_SHIFT 3 +#define SXE_RTRUP2TC_UP_MASK 7 + + +#define SXE_RTTDCS_ARBDIS 0x00000040 +#define SXE_RTTDCS_TDPAC 0x00000001 + +#define SXE_RTTDCS_VMPAC 0x00000002 + +#define SXE_RTTDCS_TDRM 0x00000010 +#define SXE_RTTDCS_ARBDIS 0x00000040 +#define SXE_RTTDCS_BDPM 0x00400000 +#define SXE_RTTDCS_BPBFSM 0x00800000 + +#define SXE_RTTDCS_SPEED_CHG 0x80000000 + + +#define SXE_RTTDT2C_MCL_SHIFT 12 +#define SXE_RTTDT2C_BWG_SHIFT 9 +#define SXE_RTTDT2C_GSP 0x40000000 +#define SXE_RTTDT2C_LSP 0x80000000 + + +#define SXE_RTTBCNRC_RS_ENA 0x80000000 +#define SXE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define SXE_RTTBCNRC_RF_INT_SHIFT 14 +#define SXE_RTTBCNRC_RF_INT_MASK \ + (SXE_RTTBCNRC_RF_DEC_MASK << SXE_RTTBCNRC_RF_INT_SHIFT) + + +#define SXE_RTTUP2TC_UP_SHIFT 3 + + +#define SXE_RTTPCS_TPPAC 0x00000020 + +#define SXE_RTTPCS_ARBDIS 0x00000040 +#define SXE_RTTPCS_TPRM 0x00000100 +#define SXE_RTTPCS_ARBD_SHIFT 22 +#define SXE_RTTPCS_ARBD_DCB 0x4 + + +#define SXE_RTTPT2C_MCL_SHIFT 12 +#define SXE_RTTPT2C_BWG_SHIFT 9 +#define SXE_RTTPT2C_GSP 0x40000000 +#define SXE_RTTPT2C_LSP 0x80000000 + + +#define SXE_TPH_CTRL 0x11074 +#define SXE_TPH_TXCTRL(_i) (0x0600C + ((_i) * 0x40)) +#define SXE_TPH_RXCTRL(_i) (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40))) + + +#define SXE_TPH_CTRL_ENABLE 0x00000000 +#define SXE_TPH_CTRL_DISABLE 0x00000001 +#define SXE_TPH_CTRL_MODE_CB1 0x00 +#define SXE_TPH_CTRL_MODE_CB2 0x02 + + +#define SXE_TPH_RXCTRL_DESC_TPH_EN BIT(5) +#define SXE_TPH_RXCTRL_HEAD_TPH_EN BIT(6) +#define SXE_TPH_RXCTRL_DATA_TPH_EN BIT(7) +#define SXE_TPH_RXCTRL_DESC_RRO_EN BIT(9) +#define SXE_TPH_RXCTRL_DATA_WRO_EN BIT(13) +#define SXE_TPH_RXCTRL_HEAD_WRO_EN BIT(15) +#define SXE_TPH_RXCTRL_CPUID_SHIFT 24 + +#define SXE_TPH_TXCTRL_DESC_TPH_EN BIT(5) +#define SXE_TPH_TXCTRL_DESC_RRO_EN BIT(9) +#define SXE_TPH_TXCTRL_DESC_WRO_EN BIT(11) +#define SXE_TPH_TXCTRL_DATA_RRO_EN BIT(13) +#define SXE_TPH_TXCTRL_CPUID_SHIFT 24 + + +#define SXE_SECTXCTRL 0x08800 +#define SXE_SECTXSTAT 0x08804 +#define SXE_SECTXBUFFAF 0x08808 +#define SXE_SECTXMINIFG 0x08810 +#define SXE_SECRXCTRL 0x08D00 +#define SXE_SECRXSTAT 0x08D04 +#define SXE_LSECTXCTRL 0x08A04 +#define SXE_LSECTXSCL 0x08A08 +#define SXE_LSECTXSCH 0x08A0C +#define SXE_LSECTXSA 0x08A10 +#define SXE_LSECTXPN(_n) (0x08A14 + (4 * (_n))) +#define SXE_LSECTXKEY(_n, _m) (0x08A1C + ((0x10 * (_n)) + (4 * (_m)))) +#define SXE_LSECRXCTRL 0x08B04 +#define SXE_LSECRXSCL 0x08B08 +#define SXE_LSECRXSCH 0x08B0C +#define SXE_LSECRXSA(_i) (0x08B10 + (4 * (_i))) +#define SXE_LSECRXPN(_i) (0x08B18 + (4 * (_i))) +#define SXE_LSECRXKEY(_n, _m) (0x08B20 + ((0x10 * (_n)) + (4 * (_m)))) + + +#define SXE_SECTXCTRL_SECTX_DIS 0x00000001 +#define SXE_SECTXCTRL_TX_DIS 0x00000002 +#define SXE_SECTXCTRL_STORE_FORWARD 0x00000004 + + +#define SXE_SECTXSTAT_SECTX_RDY 0x00000001 +#define SXE_SECTXSTAT_SECTX_OFF_DIS 0x00000002 +#define SXE_SECTXSTAT_ECC_TXERR 0x00000004 + + +#define SXE_SECRXCTRL_SECRX_DIS 0x00000001 +#define SXE_SECRXCTRL_RX_DIS 0x00000002 +#define SXE_SECRXCTRL_RP 0x00000080 + + +#define SXE_SECRXSTAT_SECRX_RDY 0x00000001 +#define SXE_SECRXSTAT_SECRX_OFF_DIS 0x00000002 +#define SXE_SECRXSTAT_ECC_RXERR 0x00000004 + +#define SXE_SECTX_DCB_ENABLE_MASK 0x00001F00 + +#define SXE_LSECTXCTRL_EN_MASK 0x00000003 +#define SXE_LSECTXCTRL_EN_SHIFT 0 +#define SXE_LSECTXCTRL_ES 0x00000010 +#define SXE_LSECTXCTRL_AISCI 0x00000020 +#define SXE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define SXE_LSECTXCTRL_PNTHRSH_SHIFT 8 +#define SXE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define SXE_LSECRXCTRL_EN_MASK 0x0000000C +#define SXE_LSECRXCTRL_EN_SHIFT 2 +#define SXE_LSECRXCTRL_DROP_EN 0x00000010 +#define SXE_LSECRXCTRL_DROP_EN_SHIFT 4 +#define SXE_LSECRXCTRL_PLSH 0x00000040 +#define SXE_LSECRXCTRL_PLSH_SHIFT 6 +#define SXE_LSECRXCTRL_RP 0x00000080 +#define SXE_LSECRXCTRL_RP_SHIFT 7 +#define SXE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +#define SXE_LSECTXSA_AN0_MASK 0x00000003 +#define SXE_LSECTXSA_AN0_SHIFT 0 +#define SXE_LSECTXSA_AN1_MASK 0x0000000C +#define SXE_LSECTXSA_AN1_SHIFT 2 +#define SXE_LSECTXSA_SELSA 0x00000010 +#define SXE_LSECTXSA_SELSA_SHIFT 4 +#define SXE_LSECTXSA_ACTSA 0x00000020 + +#define SXE_LSECRXSA_AN_MASK 0x00000003 +#define SXE_LSECRXSA_AN_SHIFT 0 +#define SXE_LSECRXSA_SAV 0x00000004 +#define SXE_LSECRXSA_SAV_SHIFT 2 +#define SXE_LSECRXSA_RETIRED 0x00000010 +#define SXE_LSECRXSA_RETIRED_SHIFT 4 + +#define SXE_LSECRXSCH_PI_MASK 0xFFFF0000 +#define SXE_LSECRXSCH_PI_SHIFT 16 + +#define SXE_LSECTXCTRL_DISABLE 0x0 +#define SXE_LSECTXCTRL_AUTH 0x1 +#define SXE_LSECTXCTRL_AUTH_ENCRYPT 0x2 + +#define SXE_LSECRXCTRL_DISABLE 0x0 +#define SXE_LSECRXCTRL_CHECK 0x1 +#define SXE_LSECRXCTRL_STRICT 0x2 +#define SXE_LSECRXCTRL_DROP 0x3 +#define SXE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + + + +#define SXE_IPSTXIDX 0x08900 +#define SXE_IPSTXSALT 0x08904 +#define SXE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) +#define SXE_IPSRXIDX 0x08E00 +#define SXE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) +#define SXE_IPSRXSPI 0x08E14 +#define SXE_IPSRXIPIDX 0x08E18 +#define SXE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) +#define SXE_IPSRXSALT 0x08E2C +#define SXE_IPSRXMOD 0x08E30 + + + +#define SXE_FNAVCTRL 0x0EE00 +#define SXE_FNAVHKEY 0x0EE68 +#define SXE_FNAVSKEY 0x0EE6C +#define SXE_FNAVDIP4M 0x0EE3C +#define SXE_FNAVSIP4M 0x0EE40 +#define SXE_FNAVTCPM 0x0EE44 +#define SXE_FNAVUDPM 0x0EE48 +#define SXE_FNAVIP6M 0x0EE74 +#define SXE_FNAVM 0x0EE70 + +#define SXE_FNAVFREE 0x0EE38 +#define SXE_FNAVLEN 0x0EE4C +#define SXE_FNAVUSTAT 0x0EE50 +#define SXE_FNAVFSTAT 0x0EE54 +#define SXE_FNAVMATCH 0x0EE58 +#define SXE_FNAVMISS 0x0EE5C + +#define SXE_FNAVSIPv6(_i) (0x0EE0C + ((_i) * 4)) +#define SXE_FNAVIPSA 0x0EE18 +#define SXE_FNAVIPDA 0x0EE1C +#define SXE_FNAVPORT 0x0EE20 +#define SXE_FNAVVLAN 0x0EE24 +#define SXE_FNAVHASH 0x0EE28 +#define SXE_FNAVCMD 0x0EE2C + + +#define SXE_FNAVCTRL_FLEX_SHIFT 16 +#define SXE_FNAVCTRL_MAX_LENGTH_SHIFT 24 +#define SXE_FNAVCTRL_FULL_THRESH_SHIFT 28 +#define SXE_FNAVCTRL_DROP_Q_SHIFT 8 +#define SXE_FNAVCTRL_PBALLOC_64K 0x00000001 +#define SXE_FNAVCTRL_PBALLOC_128K 0x00000002 +#define SXE_FNAVCTRL_PBALLOC_256K 0x00000003 +#define SXE_FNAVCTRL_INIT_DONE 0x00000008 +#define SXE_FNAVCTRL_SPECIFIC_MATCH 0x00000010 +#define SXE_FNAVCTRL_REPORT_STATUS 0x00000020 +#define SXE_FNAVCTRL_REPORT_STATUS_ALWAYS 0x00000080 + +#define SXE_FNAVCTRL_FLEX_MASK (0x1F << SXE_FNAVCTRL_FLEX_SHIFT) + +#define SXE_FNAVTCPM_DPORTM_SHIFT 16 + +#define SXE_FNAVM_VLANID 0x00000001 +#define SXE_FNAVM_VLANP 0x00000002 +#define SXE_FNAVM_POOL 0x00000004 +#define SXE_FNAVM_L4P 0x00000008 +#define SXE_FNAVM_FLEX 0x00000010 +#define SXE_FNAVM_DIPv6 0x00000020 + +#define SXE_FNAVPORT_DESTINATION_SHIFT 16 +#define SXE_FNAVVLAN_FLEX_SHIFT 16 +#define SXE_FNAVHASH_SIG_SW_INDEX_SHIFT 16 + +#define SXE_FNAVCMD_CMD_MASK 0x00000003 +#define SXE_FNAVCMD_CMD_ADD_FLOW 0x00000001 +#define SXE_FNAVCMD_CMD_REMOVE_FLOW 0x00000002 +#define SXE_FNAVCMD_CMD_QUERY_REM_FILT 0x00000003 +#define SXE_FNAVCMD_FILTER_VALID 0x00000004 +#define SXE_FNAVCMD_FILTER_UPDATE 0x00000008 +#define SXE_FNAVCMD_IPv6DMATCH 0x00000010 +#define SXE_FNAVCMD_L4TYPE_UDP 0x00000020 +#define SXE_FNAVCMD_L4TYPE_TCP 0x00000040 +#define SXE_FNAVCMD_L4TYPE_SCTP 0x00000060 +#define SXE_FNAVCMD_IPV6 0x00000080 +#define SXE_FNAVCMD_CLEARHT 0x00000100 +#define SXE_FNAVCMD_DROP 0x00000200 +#define SXE_FNAVCMD_INT 0x00000400 +#define SXE_FNAVCMD_LAST 0x00000800 +#define SXE_FNAVCMD_COLLISION 0x00001000 +#define SXE_FNAVCMD_QUEUE_EN 0x00008000 +#define SXE_FNAVCMD_FLOW_TYPE_SHIFT 5 +#define SXE_FNAVCMD_RX_QUEUE_SHIFT 16 +#define SXE_FNAVCMD_RX_TUNNEL_FILTER_SHIFT 23 +#define SXE_FNAVCMD_VT_POOL_SHIFT 24 +#define SXE_FNAVCMD_CMD_POLL 10 +#define SXE_FNAVCMD_TUNNEL_FILTER 0x00800000 + + +#define SXE_LXOFFRXCNT 0x041A8 +#define SXE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) + +#define SXE_EPC_GPRC 0x050E0 +#define SXE_RXDGPC 0x02F50 +#define SXE_RXDGBCL 0x02F54 +#define SXE_RXDGBCH 0x02F58 +#define SXE_RXDDGPC 0x02F5C +#define SXE_RXDDGBCL 0x02F60 +#define SXE_RXDDGBCH 0x02F64 +#define SXE_RXLPBKGPC 0x02F68 +#define SXE_RXLPBKGBCL 0x02F6C +#define SXE_RXLPBKGBCH 0x02F70 +#define SXE_RXDLPBKGPC 0x02F74 +#define SXE_RXDLPBKGBCL 0x02F78 +#define SXE_RXDLPBKGBCH 0x02F7C + +#define SXE_RXTPCIN 0x02F88 +#define SXE_RXTPCOUT 0x02F8C +#define SXE_RXPRDDC 0x02F9C + +#define SXE_TXDGPC 0x087A0 +#define SXE_TXDGBCL 0x087A4 +#define SXE_TXDGBCH 0x087A8 +#define SXE_TXSWERR 0x087B0 +#define SXE_TXSWITCH 0x087B4 +#define SXE_TXREPEAT 0x087B8 +#define SXE_TXDESCERR 0x087BC +#define SXE_MNGPRC 0x040B4 +#define SXE_MNGPDC 0x040B8 +#define SXE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define SXE_TQSM(_i) (0x08600 + ((_i) * 4)) +#define SXE_QPRC(_i) (0x01030 + ((_i) * 0x40)) +#define SXE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) +#define SXE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) + + +#define SXE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) +#define SXE_QPTC(_i) (0x08680 + ((_i) * 0x4)) +#define SXE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) +#define SXE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) +#define SXE_SSVPC 0x08780 +#define SXE_MNGPTC 0x0CF90 +#define SXE_MPC(_i) (0x03FA0 + ((_i) * 4)) + +#define SXE_DBUDRTCICNT(_i) (0x03C6C + ((_i) * 4)) +#define SXE_DBUDRTCOCNT(_i) (0x03C8C + ((_i) * 4)) +#define SXE_DBUDRBDPCNT(_i) (0x03D20 + ((_i) * 4)) +#define SXE_DBUDREECNT(_i) (0x03D40 + ((_i) * 4)) +#define SXE_DBUDROFPCNT(_i) (0x03D60 + ((_i) * 4)) +#define SXE_DBUDTTCICNT(_i) (0x0CE54 + ((_i) * 4)) +#define SXE_DBUDTTCOCNT(_i) (0x0CE74 + ((_i) * 4)) + + + +#define SXE_WUC 0x05800 +#define SXE_WUFC 0x05808 +#define SXE_WUS 0x05810 +#define SXE_IP6AT(_i) (0x05880 + ((_i) * 4)) + + +#define SXE_IP6AT_CNT 4 + + +#define SXE_WUC_PME_EN 0x00000002 +#define SXE_WUC_PME_STATUS 0x00000004 +#define SXE_WUC_WKEN 0x00000010 +#define SXE_WUC_APME 0x00000020 + + +#define SXE_WUFC_LNKC 0x00000001 +#define SXE_WUFC_MAG 0x00000002 +#define SXE_WUFC_EX 0x00000004 +#define SXE_WUFC_MC 0x00000008 +#define SXE_WUFC_BC 0x00000010 +#define SXE_WUFC_ARP 0x00000020 +#define SXE_WUFC_IPV4 0x00000040 +#define SXE_WUFC_IPV6 0x00000080 +#define SXE_WUFC_MNG 0x00000100 + + + + +#define SXE_TSCTRL 0x14800 +#define SXE_TSES 0x14804 +#define SXE_TSYNCTXCTL 0x14810 +#define SXE_TSYNCRXCTL 0x14820 +#define SXE_RXSTMPL 0x14824 +#define SXE_RXSTMPH 0x14828 +#define SXE_SYSTIML 0x14840 +#define SXE_SYSTIMM 0x14844 +#define SXE_SYSTIMH 0x14848 +#define SXE_TIMADJL 0x14850 +#define SXE_TIMADJH 0x14854 +#define SXE_TIMINC 0x14860 + + +#define SXE_TSYNCTXCTL_TXTT 0x0001 +#define SXE_TSYNCTXCTL_TEN 0x0010 + + +#define SXE_TSYNCRXCTL_RXTT 0x0001 +#define SXE_TSYNCRXCTL_REN 0x0010 + + +#define SXE_TSCTRL_TSSEL 0x00001 +#define SXE_TSCTRL_TSEN 0x00002 +#define SXE_TSCTRL_VER_2 0x00010 +#define SXE_TSCTRL_ONESTEP 0x00100 +#define SXE_TSCTRL_CSEN 0x01000 +#define SXE_TSCTRL_PTYP_ALL 0x00C00 +#define SXE_TSCTRL_L4_UNICAST 0x08000 + + +#define SXE_TSES_TXES 0x00200 +#define SXE_TSES_RXES 0x00800 +#define SXE_TSES_TXES_V1_SYNC 0x00000 +#define SXE_TSES_TXES_V1_DELAY_REQ 0x00100 +#define SXE_TSES_TXES_V1_ALL 0x00200 +#define SXE_TSES_RXES_V1_SYNC 0x00000 +#define SXE_TSES_RXES_V1_DELAY_REQ 0x00400 +#define SXE_TSES_RXES_V1_ALL 0x00800 +#define SXE_TSES_TXES_V2_ALL 0x00200 +#define SXE_TSES_RXES_V2_ALL 0x00800 + +#define SXE_IV_SNS 0 +#define SXE_IV_NS 8 +#define SXE_INCPD 0 +#define SXE_BASE_INCVAL 8 + + +#define SXE_VT_CTL 0x051B0 +#define SXE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) + +#define SXE_PFMBICR(_i) (0x00710 + (4 * (_i))) +#define SXE_VFLRE(i) ((i & 1)? 0x001C0 : 0x00600) +#define SXE_VFLREC(i) (0x00700 + (i * 4)) +#define SXE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define SXE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define SXE_QDE (0x02F04) +#define SXE_SPOOF(_i) (0x08200 + (_i) * 4) +#define SXE_PFDTXGSWC 0x08220 +#define SXE_VMVIR(_i) (0x08000 + ((_i) * 4)) +#define SXE_VMOLR(_i) (0x0F000 + ((_i) * 4)) +#define SXE_VLVF(_i) (0x0F100 + ((_i) * 4)) +#define SXE_VLVFB(_i) (0x0F200 + ((_i) * 4)) +#define SXE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define SXE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define SXE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define SXE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define SXE_PFMBMEM(_i) (0x13000 + (64 * (_i))) + + +#define SXE_VMOLR_CNT 64 +#define SXE_VLVF_CNT 64 +#define SXE_VLVFB_CNT 128 +#define SXE_MRCTL_CNT 4 +#define SXE_VMRVLAN_CNT 8 +#define SXE_VMRVM_CNT 8 +#define SXE_SPOOF_CNT 8 +#define SXE_VMVIR_CNT 64 +#define SXE_VFRE_CNT 2 + + +#define SXE_VMVIR_VLANA_MASK 0xC0000000 +#define SXE_VMVIR_VLAN_VID_MASK 0x00000FFF +#define SXE_VMVIR_VLAN_UP_MASK 0x0000E000 + + +#define SXE_MRCTL_VPME 0x01 + +#define SXE_MRCTL_UPME 0x02 + +#define SXE_MRCTL_DPME 0x04 + +#define SXE_MRCTL_VLME 0x08 + + +#define SXE_VT_CTL_DIS_DEFPL 0x20000000 +#define SXE_VT_CTL_REPLEN 0x40000000 +#define SXE_VT_CTL_VT_ENABLE 0x00000001 +#define SXE_VT_CTL_POOL_SHIFT 7 +#define SXE_VT_CTL_POOL_MASK (0x3F << SXE_VT_CTL_POOL_SHIFT) + + +#define SXE_PFMAILBOX_STS 0x00000001 +#define SXE_PFMAILBOX_ACK 0x00000002 +#define SXE_PFMAILBOX_VFU 0x00000004 +#define SXE_PFMAILBOX_PFU 0x00000008 +#define SXE_PFMAILBOX_RVFU 0x00000010 + + +#define SXE_PFMBICR_VFREQ 0x00000001 +#define SXE_PFMBICR_VFACK 0x00010000 +#define SXE_PFMBICR_VFREQ_MASK 0x0000FFFF +#define SXE_PFMBICR_VFACK_MASK 0xFFFF0000 + + +#define SXE_QDE_ENABLE (0x00000001) +#define SXE_QDE_HIDE_VLAN (0x00000002) +#define SXE_QDE_IDX_MASK (0x00007F00) +#define SXE_QDE_IDX_SHIFT (8) +#define SXE_QDE_WRITE (0x00010000) + + + +#define SXE_SPOOF_VLAN_SHIFT (8) + + +#define SXE_PFDTXGSWC_VT_LBEN 0x1 + + +#define SXE_VMVIR_VLANA_DEFAULT 0x40000000 +#define SXE_VMVIR_VLANA_NEVER 0x80000000 + + +#define SXE_VMOLR_UPE 0x00400000 +#define SXE_VMOLR_VPE 0x00800000 +#define SXE_VMOLR_AUPE 0x01000000 +#define SXE_VMOLR_ROMPE 0x02000000 +#define SXE_VMOLR_ROPE 0x04000000 +#define SXE_VMOLR_BAM 0x08000000 +#define SXE_VMOLR_MPE 0x10000000 + + +#define SXE_VLVF_VIEN 0x80000000 +#define SXE_VLVF_ENTRIES 64 +#define SXE_VLVF_VLANID_MASK 0x00000FFF + + +#define SXE_HDC_HOST_BASE 0x16000 +#define SXE_HDC_SW_LK (SXE_HDC_HOST_BASE + 0x00) +#define SXE_HDC_PF_LK (SXE_HDC_HOST_BASE + 0x04) +#define SXE_HDC_SW_OV (SXE_HDC_HOST_BASE + 0x08) +#define SXE_HDC_FW_OV (SXE_HDC_HOST_BASE + 0x0C) +#define SXE_HDC_PACKET_HEAD0 (SXE_HDC_HOST_BASE + 0x10) + +#define SXE_HDC_PACKET_DATA0 (SXE_HDC_HOST_BASE + 0x20) + + +#define SXE_HDC_MSI_STATUS_REG 0x17000 +#define SXE_FW_STATUS_REG 0x17004 +#define SXE_DRV_STATUS_REG 0x17008 +#define SXE_FW_HDC_STATE_REG 0x1700C +#define SXE_R0_MAC_ADDR_RAL 0x17010 +#define SXE_R0_MAC_ADDR_RAH 0x17014 +#define SXE_CRC_STRIP_REG 0x17018 + + +#define SXE_HDC_SW_LK_BIT 0x0001 +#define SXE_HDC_PF_LK_BIT 0x0003 +#define SXE_HDC_SW_OV_BIT 0x0001 +#define SXE_HDC_FW_OV_BIT 0x0001 +#define SXE_HDC_RELEASE_SW_LK 0x0000 + +#define SXE_HDC_LEN_TO_REG(n) (n - 1) +#define SXE_HDC_LEN_FROM_REG(n) (n + 1) + + +#define SXE_RX_PKT_BUF_SIZE_SHIFT 10 +#define SXE_TX_PKT_BUF_SIZE_SHIFT 10 + +#define SXE_RXIDX_TBL_SHIFT 1 +#define SXE_RXTXIDX_IPS_EN 0x00000001 +#define SXE_RXTXIDX_IDX_SHIFT 3 +#define SXE_RXTXIDX_READ 0x40000000 +#define SXE_RXTXIDX_WRITE 0x80000000 + + +#define SXE_KEEP_CRC_EN 0x00000001 + + +#define SXE_VMD_CTL 0x0581C + + +#define SXE_VMD_CTL_POOL_EN 0x00000001 +#define SXE_VMD_CTL_POOL_FILTER 0x00000002 + + +#define SXE_FLCTRL 0x14300 +#define SXE_PFCTOP 0x14304 +#define SXE_FCTTV0 0x14310 +#define SXE_FCTTV(_i) (SXE_FCTTV0 + ((_i) * 4)) +#define SXE_FCRTV 0x14320 +#define SXE_TFCS 0x14324 + + +#define SXE_FCTRL_TFCE_MASK 0x0018 +#define SXE_FCTRL_TFCE_LFC_EN 0x0008 +#define SXE_FCTRL_TFCE_PFC_EN 0x0010 +#define SXE_FCTRL_TFCE_DPF_EN 0x0020 +#define SXE_FCTRL_RFCE_MASK 0x0300 +#define SXE_FCTRL_RFCE_LFC_EN 0x0100 +#define SXE_FCTRL_RFCE_PFC_EN 0x0200 + +#define SXE_FCTRL_TFCE_FCEN_MASK 0x00FF0000 +#define SXE_FCTRL_TFCE_XONE_MASK 0xFF000000 + + +#define SXE_PFCTOP_FCT 0x8808 +#define SXE_PFCTOP_FCOP_MASK 0xFFFF0000 +#define SXE_PFCTOP_FCOP_PFC 0x01010000 +#define SXE_PFCTOP_FCOP_LFC 0x00010000 + + +#define SXE_COMCTRL 0x14400 +#define SXE_PCCTRL 0x14404 +#define SXE_LPBKCTRL 0x1440C +#define SXE_MAXFS 0x14410 +#define SXE_SACONH 0x14420 +#define SXE_SACONL 0x14424 +#define SXE_VLANCTRL 0x14430 +#define SXE_VLANID 0x14434 +#define SXE_LINKS 0x14454 +#define SXE_FPGA_SDS_STS 0x14704 +#define SXE_MSCA 0x14500 +#define SXE_MSCD 0x14504 + +#define SXE_HLREG0 0x04240 +#define SXE_MFLCN 0x04294 +#define SXE_MACC 0x04330 + +#define SXE_PCS1GLSTA 0x0420C +#define SXE_MFLCN 0x04294 +#define SXE_PCS1GANA 0x04850 +#define SXE_PCS1GANLP 0x04854 + + +#define SXE_LPBKCTRL_EN 0x00000001 + + +#define SXE_MAC_ADDR_SACONH_SHIFT 32 +#define SXE_MAC_ADDR_SACONL_MASK 0xFFFFFFFF + + +#define SXE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define SXE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define SXE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define SXE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define SXE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define SXE_PCS1GANA_SYM_PAUSE 0x100 +#define SXE_PCS1GANA_ASM_PAUSE 0x80 + + +#define SXE_LKSTS_PCS_LKSTS_UP 0x00000001 +#define SXE_LINK_UP_TIME 90 +#define SXE_AUTO_NEG_TIME 45 + + +#define SXE_MSCA_NP_ADDR_MASK 0x0000FFFF +#define SXE_MSCA_NP_ADDR_SHIFT 0 +#define SXE_MSCA_DEV_TYPE_MASK 0x001F0000 +#define SXE_MSCA_DEV_TYPE_SHIFT 16 +#define SXE_MSCA_PHY_ADDR_MASK 0x03E00000 +#define SXE_MSCA_PHY_ADDR_SHIFT 21 +#define SXE_MSCA_OP_CODE_MASK 0x0C000000 +#define SXE_MSCA_OP_CODE_SHIFT 26 +#define SXE_MSCA_ADDR_CYCLE 0x00000000 +#define SXE_MSCA_WRITE 0x04000000 +#define SXE_MSCA_READ 0x0C000000 +#define SXE_MSCA_READ_AUTOINC 0x08000000 +#define SXE_MSCA_ST_CODE_MASK 0x30000000 +#define SXE_MSCA_ST_CODE_SHIFT 28 +#define SXE_MSCA_NEW_PROTOCOL 0x00000000 +#define SXE_MSCA_OLD_PROTOCOL 0x10000000 +#define SXE_MSCA_BYPASSRA_C45 0x40000000 +#define SXE_MSCA_MDI_CMD_ON_PROG 0x80000000 + + +#define MDIO_MSCD_RDATA_LEN 16 +#define MDIO_MSCD_RDATA_SHIFT 16 + + +#define SXE_CRCERRS 0x14A04 +#define SXE_ERRBC 0x14A10 +#define SXE_RLEC 0x14A14 +#define SXE_PRC64 0x14A18 +#define SXE_PRC127 0x14A1C +#define SXE_PRC255 0x14A20 +#define SXE_PRC511 0x14A24 +#define SXE_PRC1023 0x14A28 +#define SXE_PRC1522 0x14A2C +#define SXE_BPRC 0x14A30 +#define SXE_MPRC 0x14A34 +#define SXE_GPRC 0x14A38 +#define SXE_GORCL 0x14A3C +#define SXE_GORCH 0x14A40 +#define SXE_RUC 0x14A44 +#define SXE_RFC 0x14A48 +#define SXE_ROC 0x14A4C +#define SXE_RJC 0x14A50 +#define SXE_TORL 0x14A54 +#define SXE_TORH 0x14A58 +#define SXE_TPR 0x14A5C +#define SXE_PRCPF(_i) (0x14A60 + ((_i) * 4)) +#define SXE_GPTC 0x14B00 +#define SXE_GOTCL 0x14B04 +#define SXE_GOTCH 0x14B08 +#define SXE_TPT 0x14B0C +#define SXE_PTC64 0x14B10 +#define SXE_PTC127 0x14B14 +#define SXE_PTC255 0x14B18 +#define SXE_PTC511 0x14B1C +#define SXE_PTC1023 0x14B20 +#define SXE_PTC1522 0x14B24 +#define SXE_MPTC 0x14B28 +#define SXE_BPTC 0x14B2C +#define SXE_PFCT(_i) (0x14B30 + ((_i) * 4)) + +#define SXE_MACCFG 0x0CE04 +#define SXE_MACCFG_PAD_EN 0x00000001 + + +#define SXE_COMCTRL_TXEN 0x0001 +#define SXE_COMCTRL_RXEN 0x0002 +#define SXE_COMCTRL_EDSEL 0x0004 +#define SXE_COMCTRL_SPEED_1G 0x0200 +#define SXE_COMCTRL_SPEED_10G 0x0300 + + +#define SXE_PCCTRL_TXCE 0x0001 +#define SXE_PCCTRL_RXCE 0x0002 +#define SXE_PCCTRL_PEN 0x0100 +#define SXE_PCCTRL_PCSC_ALL 0x30000 + + +#define SXE_MAXFS_TFSEL 0x0001 +#define SXE_MAXFS_RFSEL 0x0002 +#define SXE_MAXFS_MFS_MASK 0xFFFF0000 +#define SXE_MAXFS_MFS 0x40000000 +#define SXE_MAXFS_MFS_SHIFT 16 + + +#define SXE_LINKS_UP 0x00000001 + +#define SXE_10G_LINKS_DOWN 0x00000006 + + +#define SXE_LINK_SPEED_UNKNOWN 0 +#define SXE_LINK_SPEED_10_FULL 0x0002 +#define SXE_LINK_SPEED_100_FULL 0x0008 +#define SXE_LINK_SPEED_1GB_FULL 0x0020 +#define SXE_LINK_SPEED_10GB_FULL 0x0080 + + +#define SXE_HLREG0_TXCRCEN 0x00000001 +#define SXE_HLREG0_RXCRCSTRP 0x00000002 +#define SXE_HLREG0_JUMBOEN 0x00000004 +#define SXE_HLREG0_TXPADEN 0x00000400 +#define SXE_HLREG0_TXPAUSEEN 0x00001000 +#define SXE_HLREG0_RXPAUSEEN 0x00004000 +#define SXE_HLREG0_LPBK 0x00008000 +#define SXE_HLREG0_MDCSPD 0x00010000 +#define SXE_HLREG0_CONTMDC 0x00020000 +#define SXE_HLREG0_CTRLFLTR 0x00040000 +#define SXE_HLREG0_PREPEND 0x00F00000 +#define SXE_HLREG0_PRIPAUSEEN 0x01000000 +#define SXE_HLREG0_RXPAUSERECDA 0x06000000 +#define SXE_HLREG0_RXLNGTHERREN 0x08000000 +#define SXE_HLREG0_RXPADSTRIPEN 0x10000000 + +#define SXE_MFLCN_PMCF 0x00000001 +#define SXE_MFLCN_DPF 0x00000002 +#define SXE_MFLCN_RPFCE 0x00000004 +#define SXE_MFLCN_RFCE 0x00000008 +#define SXE_MFLCN_RPFCE_MASK 0x00000FF4 +#define SXE_MFLCN_RPFCE_SHIFT 4 + +#define SXE_MACC_FLU 0x00000001 +#define SXE_MACC_FSV_10G 0x00030000 +#define SXE_MACC_FS 0x00040000 + +#define SXE_DEFAULT_FCPAUSE 0xFFFF + + +#define SXE_SAQF(_i) (0x0E000 + ((_i) * 4)) +#define SXE_DAQF(_i) (0x0E200 + ((_i) * 4)) +#define SXE_SDPQF(_i) (0x0E400 + ((_i) * 4)) +#define SXE_FTQF(_i) (0x0E600 + ((_i) * 4)) +#define SXE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) + +#define SXE_MAX_FTQF_FILTERS 128 +#define SXE_FTQF_PROTOCOL_MASK 0x00000003 +#define SXE_FTQF_PROTOCOL_TCP 0x00000000 +#define SXE_FTQF_PROTOCOL_UDP 0x00000001 +#define SXE_FTQF_PROTOCOL_SCTP 2 +#define SXE_FTQF_PRIORITY_MASK 0x00000007 +#define SXE_FTQF_PRIORITY_SHIFT 2 +#define SXE_FTQF_POOL_MASK 0x0000003F +#define SXE_FTQF_POOL_SHIFT 8 +#define SXE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define SXE_FTQF_5TUPLE_MASK_SHIFT 25 +#define SXE_FTQF_SOURCE_ADDR_MASK 0x1E +#define SXE_FTQF_DEST_ADDR_MASK 0x1D +#define SXE_FTQF_SOURCE_PORT_MASK 0x1B +#define SXE_FTQF_DEST_PORT_MASK 0x17 +#define SXE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define SXE_FTQF_POOL_MASK_EN 0x40000000 +#define SXE_FTQF_QUEUE_ENABLE 0x80000000 + +#define SXE_SDPQF_DSTPORT 0xFFFF0000 +#define SXE_SDPQF_DSTPORT_SHIFT 16 +#define SXE_SDPQF_SRCPORT 0x0000FFFF + +#define SXE_L34T_IMIR_SIZE_BP 0x00001000 +#define SXE_L34T_IMIR_RESERVE 0x00080000 +#define SXE_L34T_IMIR_LLI 0x00100000 +#define SXE_L34T_IMIR_QUEUE 0x0FE00000 +#define SXE_L34T_IMIR_QUEUE_SHIFT 21 + +#define SXE_VMTXSW(_i) (0x05180 + ((_i) * 4)) +#define SXE_VMTXSW_REGISTER_COUNT 2 + +#define SXE_TXSTMP_SEL 0x14510 +#define SXE_TXSTMP_VAL 0x1451c + +#define SXE_TXTS_MAGIC0 0x005a005900580057 +#define SXE_TXTS_MAGIC1 0x005e005d005c005b + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe_drv_type.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe_drv_type.h new file mode 100644 index 000000000000..69505651377d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe_drv_type.h @@ -0,0 +1,20 @@ + +#ifndef __SXE_DRV_TYPEDEF_H__ +#define __SXE_DRV_TYPEDEF_H__ + +#ifdef SXE_DPDK +#include "sxe_types.h" +#ifndef bool +typedef _Bool bool; +#endif +#else +#include +#endif + +typedef u8 U8; +typedef u16 U16; +typedef u32 U32; +typedef u64 U64; +typedef bool BOOL; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/include/sxe_version.h b/drivers/net/ethernet/linkdata/sxevf/include/sxe_version.h new file mode 100644 index 000000000000..47c08a1518e1 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/include/sxe_version.h @@ -0,0 +1,28 @@ +#ifndef __SXE_VER_H__ +#define __SXE_VER_H__ + +#define SXE_VERSION "1.3.0.12" +#define SXE_COMMIT_ID "f0e5e96" +#define SXE_BRANCH "develop/rc/sagitta-1.3.0_B012" +#define SXE_BUILD_TIME "2024-08-27 15:56:18" + + +#define SXE_DRV_NAME "sxe" +#define SXEVF_DRV_NAME "sxevf" +#define SXE_DRV_LICENSE "GPL v2" +#define SXE_DRV_AUTHOR "sxe" +#define SXEVF_DRV_AUTHOR "sxevf" +#define SXE_DRV_DESCRIPTION "sxe driver" +#define SXEVF_DRV_DESCRIPTION "sxevf driver" + + +#define SXE_FW_NAME "soc" +#define SXE_FW_ARCH "arm32" + +#ifndef PS3_CFG_RELEASE +#define PS3_SXE_FW_BUILD_MODE "debug" +#else +#define PS3_SXE_FW_BUILD_MODE "release" +#endif + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxe_errno.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxe_errno.h new file mode 100644 index 000000000000..70724225cc17 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxe_errno.h @@ -0,0 +1,58 @@ + +#ifndef __SXE_ERRNO_H__ +#define __SXE_ERRNO_H__ + +#define SXE_ERR_MODULE_STANDARD 0 +#define SXE_ERR_MODULE_PF 1 +#define SXE_ERR_MODULE_VF 2 +#define SXE_ERR_MODULE_HDC 3 + +#define SXE_ERR_MODULE_OFFSET 16 +#define SXE_ERR_MODULE(module, errcode) \ + ((module << SXE_ERR_MODULE_OFFSET) | errcode) +#define SXE_ERR_PF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_PF, errcode) +#define SXE_ERR_VF(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_VF, errcode) +#define SXE_ERR_HDC(errcode) SXE_ERR_MODULE(SXE_ERR_MODULE_HDC, errcode) + +#define SXE_ERR_CONFIG EINVAL +#define SXE_ERR_PARAM EINVAL +#define SXE_ERR_RESET_FAILED EPERM +#define SXE_ERR_NO_SPACE ENOSPC +#define SXE_ERR_FNAV_CMD_INCOMPLETE EBUSY +#define SXE_ERR_MBX_LOCK_FAIL EBUSY +#define SXE_ERR_OPRATION_NOT_PERM EPERM +#define SXE_ERR_LINK_STATUS_INVALID EINVAL +#define SXE_ERR_LINK_SPEED_INVALID EINVAL +#define SXE_ERR_DEVICE_NOT_SUPPORTED EOPNOTSUPP +#define SXE_ERR_HDC_LOCK_BUSY EBUSY +#define SXE_ERR_HDC_FW_OV_TIMEOUT ETIMEDOUT +#define SXE_ERR_MDIO_CMD_TIMEOUT ETIMEDOUT +#define SXE_ERR_INVALID_LINK_SETTINGS EINVAL +#define SXE_ERR_FNAV_REINIT_FAILED EIO +#define SXE_ERR_CLI_FAILED EIO +#define SXE_ERR_MASTER_REQUESTS_PENDING SXE_ERR_PF(1) +#define SXE_ERR_SFP_NO_INIT_SEQ_PRESENT SXE_ERR_PF(2) +#define SXE_ERR_ENABLE_SRIOV_FAIL SXE_ERR_PF(3) +#define SXE_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_PF(4) +#define SXE_ERR_SFP_NOT_PERSENT SXE_ERR_PF(5) +#define SXE_ERR_PHY_NOT_PERSENT SXE_ERR_PF(6) +#define SXE_ERR_PHY_RESET_FAIL SXE_ERR_PF(7) +#define SXE_ERR_FC_NOT_NEGOTIATED SXE_ERR_PF(8) +#define SXE_ERR_SFF_NOT_SUPPORTED SXE_ERR_PF(9) + +#define SXEVF_ERR_MAC_ADDR_INVALID EINVAL +#define SXEVF_ERR_RESET_FAILED EIO +#define SXEVF_ERR_ARGUMENT_INVALID EINVAL +#define SXEVF_ERR_NOT_READY EBUSY +#define SXEVF_ERR_POLL_ACK_FAIL EIO +#define SXEVF_ERR_POLL_MSG_FAIL EIO +#define SXEVF_ERR_MBX_LOCK_FAIL EBUSY +#define SXEVF_ERR_REPLY_INVALID EINVAL +#define SXEVF_ERR_IRQ_NUM_INVALID EINVAL +#define SXEVF_ERR_PARAM EINVAL +#define SXEVF_ERR_MAILBOX_FAIL SXE_ERR_VF(1) +#define SXEVF_ERR_MSG_HANDLE_ERR SXE_ERR_VF(2) +#define SXEVF_ERR_DEVICE_NOT_SUPPORTED SXE_ERR_VF(3) +#define SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT SXE_ERR_VF(4) + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf.h new file mode 100644 index 000000000000..07dd2189f507 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf.h @@ -0,0 +1,129 @@ +#ifndef __SXEVF_H__ +#define __SXEVF_H__ + +#include +#include +#include +#include + +#include "sxe_log.h" +#include "sxevf_hw.h" +#include "sxevf_ring.h" +#include "sxevf_irq.h" +#include "sxevf_monitor.h" +#include "sxevf_ipsec.h" +#include "sxe_errno.h" +#include "sxe_compat.h" +#include "sxe_errno.h" + +#define SXEVF_JUMBO_FRAME_SIZE_MAX 9728 +#define SXEVF_ETH_DEAD_LOAD (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN) +#define DEV_NAME_LEN 16 + +#define CHAR_BITS 8 + +#define SXEVF_HZ_TRANSTO_MS 1000 + +#define SXEVF_KFREE(addr) \ + do { \ + if (addr != NULL) { \ + kfree(addr); \ + addr = NULL; \ + } \ + } while(0) + +enum { + SXEVF_DCB_ENABLE = BIT(0), + SXEVF_RX_LEGACY_ENABLE = BIT(1), +}; + +enum sxevf_boards { + SXE_BOARD_VF, + SXE_BOARD_VF_HV, +}; + +struct sxevf_sw_stats { + u64 tx_busy; + u64 restart_queue; + u64 tx_timeout_count; + u64 hw_csum_rx_error; + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; +}; + +struct sxevf_stats { + struct sxevf_sw_stats sw; + struct sxevf_hw_stats hw; +}; + +enum sxevf_nic_state { + SXEVF_RESETTING, + SXEVF_TESTING, + SXEVF_DOWN, + SXEVF_DISABLED, + SXEVF_REMOVING, +}; + +struct sxevf_mac_filter_context { + u8 cur_uc_addr[ETH_ALEN]; + u8 def_uc_addr[ETH_ALEN]; + u8 mc_filter_type; +}; + +struct sxevf_adapter { + char dev_name[DEV_NAME_LEN]; + + struct net_device *netdev; + struct pci_dev *pdev; + + u32 sw_mtu; + u16 msg_enable; + + struct sxevf_ring_context rx_ring_ctxt; + struct sxevf_ring_context tx_ring_ctxt; + struct sxevf_ring_context xdp_ring_ctxt; + +#ifdef SXE_IPSEC_CONFIGURE + struct sxevf_ipsec_context ipsec_ctxt; +#endif + + struct sxevf_mac_filter_context mac_filter_ctxt; + struct sxevf_irq_context irq_ctxt; + struct sxevf_monitor_context monitor_ctxt; + struct sxevf_ring_feature ring_f; + + u32 cap; + u32 cap2; + u8 tcs; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct sxevf_hw hw; + + struct bpf_prog *xdp_prog; + spinlock_t mbx_lock; + u32 mbx_version; + + unsigned long state; + struct sxevf_stats stats; + struct sxevf_link_info link; +}; + +struct workqueue_struct *sxevf_wq_get(void); + +s32 sxevf_dev_reset(struct sxevf_hw *hw); + +void sxevf_start_adapter(struct sxevf_adapter *adapter); + +void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter); + +s32 sxevf_ring_irq_init(struct sxevf_adapter *adapter); + +void sxevf_ring_irq_exit(struct sxevf_adapter *adapter); + +void sxevf_save_reset_stats(struct sxevf_adapter *adapter); + +void sxevf_last_counter_stats_init(struct sxevf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.c new file mode 100644 index 000000000000..c88c8baf6bfc --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.c @@ -0,0 +1,174 @@ + +#include +#include +#include + +#include "sxevf_csum.h" +#include "sxevf_ring.h" +#include "sxevf_tx_proc.h" +#include "sxe_log.h" + +#ifndef HAVE_SKB_CSUM_SCTP_API +static inline bool sxevf_is_sctp_ipv4(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + + if ((protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) { + LOG_DEBUG("protocal:0x%d tx packet type is ipv4 sctp.\n", + protocol); + ret = true; + } + + return ret; +} + +static inline bool sxevf_is_sctp_ipv6(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + u32 offset = skb_checksum_start_offset(skb); + u32 hdr_offset = 0; + + ipv6_find_hdr(skb, &hdr_offset, IPPROTO_SCTP, NULL, NULL); + + if ((protocol == htons(ETH_P_IPV6)) && + (offset == hdr_offset)) { + LOG_DEBUG("protocal:0x%d offset:%d tx packet type is ipv6 sctp.\n", + protocol, offset); + ret = true; + } + + return ret; +} + +static inline bool sxevf_prot_is_sctp(__be16 protocol, struct sk_buff *skb) +{ + bool ret = false; + + if (sxevf_is_sctp_ipv4(protocol, skb) || + sxevf_is_sctp_ipv6(protocol, skb)) { + ret = true; + } + + return ret; +} +#else +#define sxevf_prot_is_sctp(protocol, skb) skb_csum_is_sctp(skb) +#endif + +void sxevf_tx_csum_offload(struct sxevf_ring *tx_ring, + struct sxevf_tx_buffer *first, + struct sxevf_tx_context_desc *ctxt_desc) +{ + struct sk_buff *skb = first->skb; + u16 tucmd; + u16 ip_len; + u16 mac_len; + struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev); + + LOG_DEBUG_BDF("tx ring[%d] ip_summed:%d " + "csum_offset:%d csum_start:%d protocol:%d " + "netdev features:0x%llx\n", + tx_ring->idx, skb->ip_summed, + skb->csum_offset, skb->csum_start, + skb->protocol, tx_ring->netdev->features); + + if (skb->ip_summed != CHECKSUM_PARTIAL) { + goto no_checksum; + } + + switch (skb->csum_offset) { + case SXEVF_TCP_CSUM_OFFSET: + tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_TCP; + break; + case SXEVF_UDP_CSUM_OFFSET: + tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_UDP; + break; + case SXEVF_SCTP_CSUM_OFFSET: + if (sxevf_prot_is_sctp(first->protocol, skb)) { + tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_SCTP; + break; + } + fallthrough; + default: + skb_checksum_help(skb); + goto no_checksum; + } + + if (first->protocol == htons(ETH_P_IP)) { + tucmd |= SXEVF_TX_CTXTD_TUCMD_IPV4; + } + + first->tx_features |= SXEVF_TX_FEATURE_CSUM; + ip_len = skb_checksum_start_offset(skb) - skb_network_offset(skb); + + mac_len = skb_network_offset(skb); + + sxevf_ctxt_desc_tucmd_set(ctxt_desc, tucmd); + sxevf_ctxt_desc_iplen_set(ctxt_desc, ip_len); + sxevf_ctxt_desc_maclen_set(ctxt_desc, mac_len); + + LOG_DEBUG_BDF("tx ring[%d] protocol:%d tucmd:0x%x " + "iplen:0x%x mac_len:0x%x, tx_features:0x%x\n", + tx_ring->idx, first->protocol, tucmd, + ip_len, mac_len, first->tx_features); + +no_checksum: + return; +} + +void sxevf_rx_csum_verify(struct sxevf_ring *ring, + union sxevf_rx_data_desc *desc, + struct sk_buff *skb) +{ + LOG_DEBUG("rx ring[%d] csum verify ip_summed:%d " + "csum_offset:%d csum_start:%d pkt_info:0x%x " + "netdev feature:0x%llx\n", + ring->idx, skb->ip_summed, + skb->csum_offset, skb->csum_start, + desc->wb.lower.lo_dword.hs_rss.pkt_info, + ring->netdev->features); + + skb_checksum_none_assert(skb); + + if (!(ring->netdev->features & NETIF_F_RXCSUM)) { + LOG_WARN("rx ring[%d] checksum verify no offload " + "ip_summed:%d csum_offset:%d csum_start:%d protocol:0x%x\n", + ring->idx, skb->ip_summed, + skb->csum_offset, skb->csum_start, + skb->protocol); + goto l_out; + } + + if (sxevf_status_err_check(desc, SXEVF_RXD_STAT_IPCS) && + sxevf_status_err_check(desc, SXEVF_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; + LOG_ERROR("rx ring [%d] ip checksum fail.csum_err:%llu\n", + ring->idx, ring->rx_stats.csum_err); + goto l_out; + } + + if (sxevf_status_err_check(desc, SXEVF_RXD_STAT_LB)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + goto l_out; + } + + if (!sxevf_status_err_check(desc, SXEVF_RXD_STAT_L4CS)) { + LOG_DEBUG("rx ring[%d] no need verify L4 checksum\n", + ring->idx); + goto l_out; + } + + if (sxevf_status_err_check(desc, SXEVF_RXDADV_ERR_L4E)) { + + ring->rx_stats.csum_err++; + + LOG_ERROR("rx ring[%d] L4 checksum verify error.\n", ring->idx); + goto l_out; + } + + skb->ip_summed = CHECKSUM_UNNECESSARY; + +l_out: + return; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.h new file mode 100644 index 000000000000..3f684cfd9f17 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_csum.h @@ -0,0 +1,36 @@ + +#ifndef __SXEVF_CSUM_H__ +#define __SXEVF_CSUM_H__ + +#include +#include +#include +#include + +#include "sxevf.h" +#include "sxevf_ring.h" + +#ifdef NOT_INCLUDE_SCTP_H +typedef struct sctphdr { + __be16 source; + __be16 dest; + __be32 vtag; + __le32 checksum; +} __packed sctp_sctphdr_t; +#else +#include +#endif + +#define SXEVF_TCP_CSUM_OFFSET (offsetof(struct tcphdr, check)) +#define SXEVF_UDP_CSUM_OFFSET (offsetof(struct udphdr, check)) +#define SXEVF_SCTP_CSUM_OFFSET (offsetof(struct sctphdr, checksum)) + +void sxevf_tx_csum_offload(struct sxevf_ring *tx_ring, + struct sxevf_tx_buffer *first, + struct sxevf_tx_context_desc *ctxt_desc); + +void sxevf_rx_csum_verify(struct sxevf_ring *ring, + union sxevf_rx_data_desc *desc, + struct sk_buff *skb); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.c new file mode 100644 index 000000000000..6e77d5cfeedf --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.c @@ -0,0 +1,27 @@ + +#include +#include +#include + +#include "sxevf_debug.h" + +#define SKB_DESCRIPTION_LEN 256 +void sxevf_dump_skb(struct sk_buff *skb) +{ +#ifndef SXE_DRIVER_RELEASE + u32 len = skb->len; + u32 data_len = skb->data_len; +#endif + + s8 desc[SKB_DESCRIPTION_LEN] = {}; + + snprintf(desc, SKB_DESCRIPTION_LEN, + "skb addr:0x%llx %s", (u64)skb, "linear region"); +#ifndef SXE_DRIVER_RELEASE + sxe_log_binary(__FILE__, __FUNCTION__, __LINE__, + (u8 *)skb->data, (u64)skb, min_t(u32, len - data_len, 256), desc); +#endif + + return; +} + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.h new file mode 100644 index 000000000000..434ed11d7651 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_debug.h @@ -0,0 +1,17 @@ + +#ifndef __SXE_DEBUG_H__ +#define __SXE_DEBUG_H__ + +#include +#include "sxe_log.h" + +void sxevf_dump_skb(struct sk_buff *skb); + +#if defined SXE_DRIVER_RELEASE +#define SKB_DUMP(skb) +#else +#define SKB_DUMP(skb) sxevf_dump_skb(skb) +#endif + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.c new file mode 100644 index 000000000000..86df0f8eb909 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.c @@ -0,0 +1,704 @@ +#include "sxe_version.h" +#include "sxevf_ethtool.h" +#include "sxevf_hw.h" +#include "sxe_log.h" +#include "sxevf_tx_proc.h" +#include "sxevf_rx_proc.h" +#include "sxevf_netdev.h" +#include "sxevf_msg.h" +#include "sxevf_irq.h" + +#define SXEVF_DIAG_REGS_TEST 0 +#define SXEVF_DIAG_LINK_TEST 1 + +#define SXEVF_TEST_SLEEP_TIME 4 + +#define SXEVF_ETHTOOL_DUMP_REGS_NUM (sxevf_reg_dump_num_get()) +#define SXEVF_ETHTOOL_DUMP_REGS_LEN (SXEVF_ETHTOOL_DUMP_REGS_NUM * sizeof(u32)) + +#define SXEVF_STAT(m) SXEVF_STATS, \ + sizeof(((struct sxevf_adapter *)0)->m), \ + offsetof(struct sxevf_adapter, m) + +#define SXEVF_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device_stats *)0)->m), \ + offsetof(struct net_device_stats, m) + +static const char sxevf_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Link test (on/offline)" +}; + +static const struct sxevf_ethtool_stats sxevf_gstrings_stats[] = { + {"rx_packets", SXEVF_NETDEV_STAT(rx_packets)}, + {"tx_packets", SXEVF_NETDEV_STAT(tx_packets)}, + {"rx_bytes", SXEVF_NETDEV_STAT(rx_bytes)}, + {"tx_bytes", SXEVF_NETDEV_STAT(tx_bytes)}, + {"multicast", SXEVF_NETDEV_STAT(multicast)}, + {"tx_busy", SXEVF_STAT(stats.sw.tx_busy)}, + {"tx_restart_queue", SXEVF_STAT(stats.sw.restart_queue)}, + {"tx_timeout_count", SXEVF_STAT(stats.sw.tx_timeout_count)}, + {"rx_csum_offload_errors", SXEVF_STAT(stats.sw.hw_csum_rx_error)}, + {"alloc_rx_page", SXEVF_STAT(stats.sw.alloc_rx_page)}, + {"alloc_rx_page_failed", SXEVF_STAT(stats.sw.alloc_rx_page_failed)}, + {"alloc_rx_buff_failed", SXEVF_STAT(stats.sw.alloc_rx_buff_failed)}, +}; + +static const char sxevf_priv_flags_strings[][ETH_GSTRING_LEN] = { +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + "legacy-rx", +#endif +}; + +u32 sxevf_self_test_suite_num_get(void) +{ + return sizeof(sxevf_gstrings_test) / ETH_GSTRING_LEN; +} + +u32 sxevf_stats_num_get(void) +{ + return ARRAY_SIZE(sxevf_gstrings_stats); +} + +u32 sxevf_priv_flags_num_get(void) +{ + return ARRAY_SIZE(sxevf_priv_flags_strings); +} + +static void sxevf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, SXEVF_DRV_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, SXE_VERSION, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = SXEVF_PRIV_FLAGS_STR_LEN; + + return; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static void +sxevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *kernel_ring, + struct netlink_ext_ack __always_unused *extack) +#else +static void sxevf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = SXEVF_DESC_CNT_MAX; + ring->tx_max_pending = SXEVF_DESC_CNT_MAX; + ring->rx_pending = adapter->rx_ring_ctxt.ring[0]->depth; + ring->tx_pending = adapter->tx_ring_ctxt.ring[0]->depth; + + return; +} + +inline static bool sxevf_ringparam_changed(struct sxevf_adapter *adapter, + struct ethtool_ringparam *ring, + u32 *tx_cnt, u32 *rx_cnt) +{ + bool changed = true; + + *tx_cnt = clamp_t(u32, ring->tx_pending, + SXEVF_DESC_CNT_MIN, SXEVF_DESC_CNT_MAX); + *tx_cnt = ALIGN(*tx_cnt, SXEVF_REQ_DESCRIPTOR_MULTIPLE); + + *rx_cnt = clamp_t(u32, ring->rx_pending, + SXEVF_DESC_CNT_MIN, SXEVF_DESC_CNT_MAX); + *rx_cnt = ALIGN(*rx_cnt, SXEVF_REQ_DESCRIPTOR_MULTIPLE); + + if ((*tx_cnt == adapter->tx_ring_ctxt.depth) && + (*rx_cnt == adapter->rx_ring_ctxt.depth)) { + changed = false; + } + + return changed; +} + +inline static void sxevf_ring_depth_set(struct sxevf_adapter *adapter, + u32 tx_cnt, u32 rx_cnt) +{ + u32 i; + struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxevf_ring **rx_ring = adapter->rx_ring_ctxt.ring; + struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + tx_ring[i]->depth = tx_cnt; + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + xdp_ring[i]->depth = tx_cnt; + } + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + rx_ring[i]->depth = rx_cnt; + } + adapter->tx_ring_ctxt.depth = tx_cnt; + adapter->xdp_ring_ctxt.depth = tx_cnt; + adapter->rx_ring_ctxt.depth = rx_cnt; + + return; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static int +sxevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *user_param, + struct kernel_ethtool_ringparam __always_unused *kernel_ring, + struct netlink_ext_ack __always_unused *extack) +#else +static int sxevf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *user_param) +#endif +{ + int ret = 0; + u32 new_rx_count, new_tx_count; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + if ((user_param->rx_mini_pending) || (user_param->rx_jumbo_pending)) { + LOG_ERROR_BDF("dont support set rx_mini_pending=%u or rx_jumbo_pending=%u\n", + user_param->rx_mini_pending, user_param->rx_jumbo_pending); + ret = -EINVAL; + goto l_end; + } + + if (!sxevf_ringparam_changed(adapter, user_param, + &new_tx_count, &new_rx_count)) { + LOG_DEBUG_BDF("ring depth dont change, tx_depth=%u, rx_depth=%u\n", + new_tx_count, new_rx_count); + goto l_end; + } + + while (test_and_set_bit(SXEVF_RESETTING, &adapter->state)) { + usleep_range(1000, 2000); + } + + if (!netif_running(adapter->netdev)) { + sxevf_ring_depth_set(adapter, new_tx_count, new_rx_count); + goto l_clear; + } + + sxevf_down(adapter); + + if (new_tx_count != adapter->tx_ring_ctxt.depth) { + ret = sxevf_tx_ring_depth_reset(adapter, new_tx_count); + if (ret < 0) { + goto l_up; + } + } + + if (new_rx_count != adapter->rx_ring_ctxt.depth) { + ret = sxevf_rx_ring_depth_reset(adapter, new_rx_count); + } +l_up: + sxevf_up(adapter); +l_clear: + clear_bit(SXEVF_RESETTING, &adapter->state); +l_end: + return ret; +} + +static void sxevf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u32 i; + char *p = (char *)data; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *sxevf_gstrings_test, + SXEVF_TEST_GSTRING_ARRAY_SIZE * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: + for (i = 0; i < SXEVF_STATS_ARRAY_SIZE; i++) { + memcpy(p, sxevf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sprintf(p, "tx_ring_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_ring_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + sprintf(p, "xdp_ring_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "xdp_ring_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sprintf(p, "rx_ring_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_ring_%u_bytes", i); + p += ETH_GSTRING_LEN; + } + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, sxevf_priv_flags_strings, + SXEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + default: + break; + } + + return ; +} + +static int sxevf_get_sset_count(struct net_device *netdev, int sset) +{ + int ret; + + switch (sset) { + case ETH_SS_TEST: + ret = SXEVF_TEST_GSTRING_ARRAY_SIZE; + break; + case ETH_SS_STATS: + ret = SXEVF_STATS_LEN; + break; + case ETH_SS_PRIV_FLAGS: + ret = SXEVF_PRIV_FLAGS_STR_LEN; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static void sxevf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + s8 *p; + u32 i, j, start; + struct sxevf_ring *ring; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + sxevf_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < SXEVF_STATS_ARRAY_SIZE; i++) { + switch (sxevf_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) net_stats + + sxevf_gstrings_stats[i].stat_offset; + break; + case SXEVF_STATS: + p = (char *) adapter + + sxevf_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (sxevf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < adapter->tx_ring_ctxt.num; j++) { + ring = adapter->tx_ring_ctxt.ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++) { + ring = adapter->xdp_ring_ctxt.ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + for (j = 0; j < adapter->rx_ring_ctxt.num; j++) { + ring = adapter->rx_ring_ctxt.ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; + } + + return; +} + +static int sxevf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int ret = -EOPNOTSUPP; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->rx_ring_ctxt.num; + ret = 0; + break; + default: + LOG_DEBUG_BDF("command parameters not supported\n, cmd=%u", cmd->cmd); + break; + } + + return ret; +} + +static u32 sxevf_get_priv_flags(struct net_device *netdev) +{ + u32 priv_flags = 0; +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + struct sxevf_adapter *adapter = netdev_priv(netdev); + + if (adapter->cap & SXEVF_RX_LEGACY_ENABLE) + priv_flags |= SXEVF_PRIV_FLAGS_LEGACY_RX; +#endif + + return priv_flags; +} + +static int sxevf_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + u32 cap = adapter->cap; + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + cap &= ~SXEVF_RX_LEGACY_ENABLE; + if (priv_flags & SXEVF_PRIV_FLAGS_LEGACY_RX) { + cap |= SXEVF_RX_LEGACY_ENABLE; + } +#endif + + if (cap != adapter->cap) { + adapter->cap = cap; + + if (netif_running(netdev)) { + sxevf_hw_reinit(adapter); + } + } + + LOG_DEBUG_BDF("priv_flags=%u\n", priv_flags); + + return 0; +} + +static int sxevf_nway_reset(struct net_device *netdev) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + sxevf_hw_reinit(adapter); + LOG_DEBUG_BDF("ethtool reset\n"); + } + + return 0; +} + +static int sxevf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + cmd->base.autoneg = AUTONEG_DISABLE; + cmd->base.port = -1; + + if (adapter->link.is_up) { + switch (adapter->link.speed) { + case SXEVF_LINK_SPEED_10GB_FULL: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + cmd->base.speed = SPEED_10000; + break; + case SXEVF_LINK_SPEED_1GB_FULL: + cmd->base.speed = SPEED_1000; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + break; + default: + cmd->base.speed = SPEED_10000; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseKR_Full); + break; + } + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static u32 sxevf_get_rss_redir_tbl_size(struct net_device *netdev) +{ + return SXEVF_MAX_RETA_ENTRIES; +} + +static u32 sxevf_get_rss_hash_key_size(struct net_device *netdev) +{ + return SXEVF_RSS_HASH_KEY_SIZE; +} + +STATIC int sxevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + int err = 0; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + if (hfunc) { + *hfunc = ETH_RSS_HASH_TOP; + } + + if (!indir && !key) { + LOG_DEBUG_BDF("param err, indir=%p, key=%p\n", indir, key); + return 0; + } + + spin_lock_bh(&adapter->mbx_lock); + if (indir) { + err = sxevf_redir_tbl_get(&adapter->hw, adapter->rx_ring_ctxt.num, indir); + } + + if (!err && key) { + err = sxevf_rss_hash_key_get(&adapter->hw, key); + } + + spin_unlock_bh(&adapter->mbx_lock); + + return err; +} + +static int sxevf_get_regs_len(struct net_device *netdev) +{ + return SXEVF_ETHTOOL_DUMP_REGS_LEN; +} + +static void sxevf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + u8 dump_regs_num; + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + + memset(data, 0, SXEVF_ETHTOOL_DUMP_REGS_LEN); + + regs->version = 0; + + dump_regs_num = hw->setup.ops->regs_dump(hw, data, SXEVF_ETHTOOL_DUMP_REGS_LEN); + + if (dump_regs_num != SXEVF_ETHTOOL_DUMP_REGS_NUM) { + LOG_WARN_BDF("dump_regs_num=%u, regs_num_max=%u\n", dump_regs_num, SXEVF_ETHTOOL_DUMP_REGS_NUM); + } + + return; +} + +static u32 sxevf_get_msglevel(struct net_device *netdev) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void sxevf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; + + return; +} + +static s32 sxevf_link_test(struct sxevf_adapter *adapter) +{ + s32 ret; + u32 link_up; + struct sxevf_hw *hw = &adapter->hw; + + link_up = hw->setup.ops->link_state_get(hw); + if (!(link_up & SXE_VFLINKS_UP)) { + ret = -SXEVF_DIAG_TEST_BLOCKED; + } else { + ret = SXEVF_DIAG_TEST_PASSED; + } + + return ret; +} + +STATIC int sxevf_reg_test(struct sxevf_adapter *adapter) +{ + s32 ret; + struct sxevf_hw *hw = &adapter->hw; + + if (sxevf_is_hw_fault(hw)) { + LOG_DEV_ERR("nic hw fault - register test blocked\n"); + ret = -SXEVF_DIAG_TEST_BLOCKED; + goto l_end; + } + + ret = hw->setup.ops->regs_test(hw); + if (ret) { + LOG_ERROR_BDF("register test failed\n"); + goto l_end; + } + +l_end: + return ret; +} + +static void sxevf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *result) +{ + s32 ret; + struct sxevf_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); + struct sxevf_hw *hw = &adapter->hw; + + if (sxevf_is_hw_fault(hw)) { + result[SXEVF_DIAG_REGS_TEST] = SXEVF_DIAG_TEST_BLOCKED; + result[SXEVF_DIAG_LINK_TEST] = SXEVF_DIAG_TEST_BLOCKED; + eth_test->flags |= ETH_TEST_FL_FAILED; + LOG_DEV_ERR("nic hw fault - test blocked\n"); + return; + } + + set_bit(SXEVF_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + + LOG_DEV_DEBUG("offline testing starting\n"); + + ret = sxevf_link_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXEVF_DIAG_LINK_TEST] = -ret; + + if (if_running) { + sxevf_close(netdev); + } else { + sxevf_reset(adapter); + } + + LOG_DEV_DEBUG("register testing starting\n"); + + ret = sxevf_reg_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXEVF_DIAG_REGS_TEST] = -ret; + + sxevf_reset(adapter); + + clear_bit(SXEVF_TESTING, &adapter->state); + if (if_running) { + sxevf_open(netdev); + } + } else { + LOG_DEV_DEBUG("online testing starting\n"); + + ret = sxevf_link_test(adapter); + if (ret) { + eth_test->flags |= ETH_TEST_FL_FAILED; + } + result[SXEVF_DIAG_LINK_TEST] = -ret; + + result[SXEVF_DIAG_REGS_TEST] = SXEVF_DIAG_TEST_PASSED; + + clear_bit(SXEVF_TESTING, &adapter->state); + } + + msleep_interruptible(SXEVF_TEST_SLEEP_TIME * SXEVF_HZ_TRANSTO_MS); + + return; +} + +static int sxevf_get_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + return sxevf_irq_coalesce_get(netdev, ec); +} + +static int sxevf_set_coalesce(struct net_device *netdev, +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else + struct ethtool_coalesce *ec) +#endif +{ + return sxevf_irq_coalesce_set(netdev, ec); +} + +static const struct ethtool_ops sxevf_ethtool_ops = { +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif + .get_drvinfo = sxevf_get_drvinfo, + .nway_reset = sxevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = sxevf_get_ringparam, + .set_ringparam = sxevf_set_ringparam, + .get_strings = sxevf_get_strings, + .get_sset_count = sxevf_get_sset_count, + .get_ethtool_stats = sxevf_get_ethtool_stats, + .get_rxnfc = sxevf_get_rxnfc, + .get_rxfh_indir_size = sxevf_get_rss_redir_tbl_size, + .get_rxfh_key_size = sxevf_get_rss_hash_key_size, + .get_rxfh = sxevf_get_rxfh, + .get_link_ksettings = sxevf_get_link_ksettings, + .get_priv_flags = sxevf_get_priv_flags, + .set_priv_flags = sxevf_set_priv_flags, + .get_regs_len = sxevf_get_regs_len, + .get_regs = sxevf_get_regs, + .get_msglevel = sxevf_get_msglevel, + .set_msglevel = sxevf_set_msglevel, + .self_test = sxevf_diag_test, + + .set_coalesce = sxevf_set_coalesce, + .get_coalesce = sxevf_get_coalesce, +}; + +void sxevf_ethtool_ops_set(struct net_device *netdev) +{ + netdev->ethtool_ops = &sxevf_ethtool_ops; + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.h new file mode 100644 index 000000000000..e0183c414a42 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ethtool.h @@ -0,0 +1,42 @@ +#ifndef __SXEVF_ETHTOOL_H__ +#define __SXEVF_ETHTOOL_H__ + +#include +#include "sxevf.h" + +#define SXEVF_TEST_GSTRING_ARRAY_SIZE sxevf_self_test_suite_num_get() + +#define SXEVF_RING_STATS_LEN ( \ + (((struct sxevf_adapter *)netdev_priv(netdev))->tx_ring_ctxt.num + \ + ((struct sxevf_adapter *)netdev_priv(netdev))->xdp_ring_ctxt.num + \ + ((struct sxevf_adapter *)netdev_priv(netdev))->rx_ring_ctxt.num) * \ + (sizeof(struct sxevf_ring_stats) / sizeof(u64))) + +#define SXEVF_STATS_ARRAY_SIZE sxevf_stats_num_get() + +#define SXEVF_STATS_LEN (SXEVF_STATS_ARRAY_SIZE + SXEVF_RING_STATS_LEN) + +#define SXEVF_PRIV_FLAGS_LEGACY_RX BIT(0) +#define SXEVF_PRIV_FLAGS_STR_LEN sxevf_priv_flags_num_get() + +enum { + NETDEV_STATS, + SXEVF_STATS +}; + +struct sxevf_ethtool_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +u32 sxevf_self_test_suite_num_get(void); + +u32 sxevf_stats_num_get(void); + +u32 sxevf_priv_flags_num_get(void); + +void sxevf_ethtool_ops_set(struct net_device *netdev); + +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.c new file mode 100644 index 000000000000..5a7e718cedc5 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.c @@ -0,0 +1,1054 @@ +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#include + +#include "sxevf_hw.h" +#include "sxevf_regs.h" +#include "sxe_log.h" +#include "sxevf_irq.h" +#include "sxevf_msg.h" +#include "sxevf_ring.h" +#include "sxevf.h" +#include "sxevf_rx_proc.h" +#else +#include "sxe_errno.h" +#include "sxe_logs.h" +#include "sxe_dpdk_version.h" +#include "sxe_compat_version.h" +#include "sxevf.h" +#include "sxevf_hw.h" +#endif + +#if defined SXE_DPDK_L4_FEATURES && defined SXE_DPDK_SRIOV +struct sxevf_adapter; +#endif + +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +#define DMA_MASK_NONE 0x0ULL + +#define SXEVF_REG_READ_CNT 5 + +#define SXE_REG_READ_FAIL 0xffffffffU + +#define SXEVF_RING_WAIT_LOOP (100) +#define SXEVF_MAX_RX_DESC_POLL (10) + + +#define SXEVF_REG_READ(hw, addr) sxevf_reg_read(hw, addr) +#define SXEVF_REG_WRITE(hw, reg, value) sxevf_reg_write(hw, reg, value) +#define SXEVF_WRITE_FLUSH(a) sxevf_reg_read(a, SXE_VFSTATUS) + +#ifndef SXE_DPDK +void sxevf_hw_fault_handle(struct sxevf_hw *hw) +{ + struct sxevf_adapter *adapter = hw->adapter; + + if (test_bit(SXEVF_HW_FAULT, &hw->state)) { + goto l_ret; + } + + set_bit(SXEVF_HW_FAULT, &hw->state); + + LOG_DEV_ERR("sxe nic hw fault\n"); + + if ((hw->fault_handle != NULL) && (hw->priv != NULL) ) { + hw->fault_handle(hw->priv); + } + +l_ret: + return; +} + +static void sxevf_hw_fault_check(struct sxevf_hw *hw, u32 reg) +{ + u32 value; + u8 __iomem *base_addr = hw->reg_base_addr; + struct sxevf_adapter *adapter = hw->adapter; + u8 i; + + if (reg == SXE_VFSTATUS) { + sxevf_hw_fault_handle(hw); + return; + } + + + for (i = 0; i < SXEVF_REG_READ_CNT; i++) { + value = hw->reg_read(base_addr + SXE_VFSTATUS); + + if (value != SXEVF_REG_READ_FAIL) { + break; + } + + mdelay(20); + } + + LOG_INFO_BDF("retry done i:%d value:0x%x\n", i, value); + + if (value == SXEVF_REG_READ_FAIL) { + sxevf_hw_fault_handle(hw); + } + + return; +} + +STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg) +{ + u32 value; + u8 __iomem *base_addr = hw->reg_base_addr; + struct sxevf_adapter *adapter = hw->adapter; + + if (sxevf_is_hw_fault(hw)) { + value = SXEVF_REG_READ_FAIL; + goto l_ret; + } + + value = hw->reg_read(base_addr + reg); + if (unlikely(SXEVF_REG_READ_FAIL == value)) { + LOG_ERROR_BDF("reg[0x%x] read failed, value=%#x\n", reg, value); + sxevf_hw_fault_check(hw, reg); + } + +l_ret: + return value; +} + +STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *base_addr = hw->reg_base_addr; + + if (sxevf_is_hw_fault(hw)) { + goto l_ret; + } + + hw->reg_write(value, base_addr + reg); + +l_ret: + return; +} + +#else + +STATIC u32 sxevf_reg_read(struct sxevf_hw *hw, u32 reg) +{ + u32 i, value; + u8 __iomem *base_addr = hw->reg_base_addr; + + value = rte_le_to_cpu_32(rte_read32(base_addr + reg)); + if (unlikely(SXEVF_REG_READ_FAIL == value)) { + for (i = 0; i < SXEVF_REG_READ_CNT; i++) { + LOG_ERROR("reg[0x%x] read failed, value=%#x\n", + reg, value); + value = rte_le_to_cpu_32(rte_read32(base_addr + reg)); + if (value != SXEVF_REG_READ_FAIL) { + LOG_INFO("reg[0x%x] read ok, value=%#x\n", + reg, value); + break; + } + + mdelay(3); + } + } + + return value; +} + +STATIC void sxevf_reg_write(struct sxevf_hw *hw, u32 reg, u32 value) +{ + u8 __iomem *base_addr = hw->reg_base_addr; + + rte_write32((rte_cpu_to_le_32(value)), (base_addr + reg)); + + return; +} +#endif + +void sxevf_hw_stop(struct sxevf_hw *hw) +{ + u8 i; + u32 value; + + for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) { + value = SXEVF_REG_READ(hw, SXE_VFRXDCTL(i)); + if (value & SXE_VFRXDCTL_ENABLE) { + value &= ~SXE_VFRXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), value); + } + } + + SXEVF_WRITE_FLUSH(hw); + + SXEVF_REG_WRITE(hw, SXE_VFEIMC, SXEVF_VFEIMC_IRQ_MASK); + SXEVF_REG_READ(hw, SXE_VFEICR); + + for (i = 0; i < SXEVF_TXRX_RING_NUM_MAX; i++) { + value = SXEVF_REG_READ(hw, SXE_VFTXDCTL(i)); + if (value & SXE_VFTXDCTL_ENABLE) { + value &= ~SXE_VFTXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), value); + } + } + + return; +} + +void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg) +{ + struct sxevf_adapter *adapter = hw->adapter; + + SXEVF_REG_WRITE(hw, SXE_VFMBMEM + (index << 2), msg); + + LOG_DEBUG_BDF("index:%u write mbx mem:0x%x.\n", index, msg); + + return; +} + +u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index) +{ + u32 value = SXEVF_REG_READ(hw, SXE_VFMBMEM + (index << 2)); + struct sxevf_adapter *adapter = hw->adapter; + + LOG_DEBUG_BDF("index:%u read mbx mem:0x%x.\n", index, value); + + return value; +} + +u32 sxevf_mailbox_read(struct sxevf_hw *hw) +{ + return SXEVF_REG_READ(hw, SXE_VFMAILBOX); +} + +void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, value); + return; +} + +void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw) +{ + SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_REQ); + + return; +} + +void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw) +{ + SXEVF_REG_WRITE(hw, SXE_VFMAILBOX, SXE_VFMAILBOX_ACK); + + return; +} + +void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector) +{ + u8 allocation; + u32 ivar; + + allocation = vector | SXEVF_IVAR_ALLOC_VALID; + + ivar = SXEVF_REG_READ(hw, SXE_VFIVAR_MISC); + ivar &= ~0xFF; + ivar |= allocation; + + SXEVF_REG_WRITE(hw, SXE_VFIVAR_MISC, ivar); + + return; +} + +void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFEIMS, value); + + return; +} + +void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask) +{ + SXEVF_REG_WRITE(hw, SXE_VFEIAM, mask); + SXEVF_REG_WRITE(hw, SXE_VFEIMS, mask); + + return; +} + +void sxevf_irq_disable(struct sxevf_hw *hw) +{ + SXEVF_REG_WRITE(hw, SXE_VFEIAM, 0); + SXEVF_REG_WRITE(hw, SXE_VFEIMC, ~0); + + SXEVF_WRITE_FLUSH(hw); + + return; +} + +void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector) +{ + u8 allocation; + u32 ivar, position; + + allocation = vector | SXEVF_IVAR_ALLOC_VALID; + + position = ((hw_ring_idx & 1) * 16) + (8 * is_tx); + + ivar = SXEVF_REG_READ(hw, SXE_VFIVAR(hw_ring_idx >> 1)); + ivar &= ~(0xFF << position); + ivar |= (allocation << position); + + SXEVF_REG_WRITE(hw, SXE_VFIVAR(hw_ring_idx >> 1), ivar); + + return; +} + +void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval) +{ + u32 eitr = interval & SXEVF_EITR_ITR_MASK; + + eitr |= SXEVF_EITR_CNT_WDIS; + + SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), eitr); + + return; +} + +static void sxevf_event_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFEITR(irq_idx), value); + + return; +} + +static void sxevf_pending_irq_clear(struct sxevf_hw *hw) +{ + SXEVF_REG_READ(hw, SXE_VFEICR); + + return; +} + +static void sxevf_ring_irq_trigger(struct sxevf_hw *hw, u64 eics) +{ + SXEVF_REG_WRITE(hw, SXE_VFEICS, eics); + + return; +} + +static const struct sxevf_irq_operations sxevf_irq_ops = { + .ring_irq_interval_set = sxevf_ring_irq_interval_set, + .event_irq_interval_set = sxevf_event_irq_interval_set, + .ring_irq_map = sxevf_hw_ring_irq_map, + .event_irq_map = sxevf_event_irq_map, + .pending_irq_clear = sxevf_pending_irq_clear, + .ring_irq_trigger = sxevf_ring_irq_trigger, + .specific_irq_enable = sxevf_specific_irq_enable, + .irq_enable = sxevf_irq_enable, + .irq_disable = sxevf_irq_disable, +}; + +void sxevf_hw_reset(struct sxevf_hw *hw) +{ + SXEVF_REG_WRITE(hw, SXE_VFCTRL, SXE_VFCTRL_RST); + SXEVF_WRITE_FLUSH(hw); + + return; +} + +STATIC bool sxevf_hw_rst_done(struct sxevf_hw *hw) +{ + return !(SXEVF_REG_READ(hw, SXE_VFCTRL) & SXE_VFCTRL_RST); +} + +u32 sxevf_link_state_get(struct sxevf_hw *hw) +{ + return SXEVF_REG_READ(hw, SXE_VFLINKS); +} + +u32 dump_regs[] = { + SXE_VFCTRL, +}; + +u16 sxevf_reg_dump_num_get(void) +{ + return ARRAY_SIZE(dump_regs); +} + +static u32 sxevf_reg_dump(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size) +{ + u32 i; + u32 regs_num = buf_size / sizeof(u32); + + for (i = 0; i < regs_num; i++) { + regs_buff[i] = SXEVF_REG_READ(hw, dump_regs[i]); + } + + return i; +} + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +struct sxevf_self_test_reg { + u32 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +static const struct sxevf_self_test_reg self_test_reg[] = { + { SXE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + { SXE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFFFF, 0x000FFFFF }, + { SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, SXEVF_RXDCTL_ENABLE }, + { SXE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + { SXE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 }, + { SXE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + { SXE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + { SXE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + { .reg = 0 } +}; + +static s32 sxevf_reg_pattern_test(struct sxevf_hw *hw, u32 reg, + u32 mask, u32 write) +{ + s32 ret = 0; + u32 pat, val, before; + static const u32 test_pattern[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFE}; + struct sxevf_adapter *adapter = hw->adapter; + + if (sxevf_is_hw_fault(hw)) { + LOG_ERROR_BDF("hw fault\n"); + ret = -SXEVF_DIAG_TEST_BLOCKED; + goto l_end; + } + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = SXEVF_REG_READ(hw, reg); + + SXEVF_REG_WRITE(hw, reg, test_pattern[pat] & write); + val = SXEVF_REG_READ(hw, reg); + if (val != (test_pattern[pat] & write & mask)) { + LOG_MSG_ERR(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + SXEVF_REG_WRITE(hw, reg, before); + ret = -SXEVF_DIAG_REG_PATTERN_TEST_ERR; + goto l_end; + } + + SXEVF_REG_WRITE(hw, reg, before); + } + +l_end: + return ret; +} + +static s32 sxevf_reg_set_and_check(struct sxevf_hw *hw, int reg, + u32 mask, u32 write) +{ + s32 ret = 0; + u32 val, before; + struct sxevf_adapter *adapter = hw->adapter; + + if (sxevf_is_hw_fault(hw)) { + LOG_ERROR_BDF("hw fault\n"); + ret = -SXEVF_DIAG_TEST_BLOCKED; + goto l_end; + } + + before = SXEVF_REG_READ(hw, reg); + SXEVF_REG_WRITE(hw, reg, write & mask); + val = SXEVF_REG_READ(hw, reg); + if ((write & mask) != (val & mask)) { + LOG_DEV_ERR("set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + SXEVF_REG_WRITE(hw, reg, before); + ret = -SXEVF_DIAG_CHECK_REG_TEST_ERR; + goto l_end; + } + + SXEVF_REG_WRITE(hw, reg, before); + +l_end: + return ret; +} + +STATIC s32 sxevf_regs_test(struct sxevf_hw *hw) +{ + u32 i; + s32 ret = 0; + const struct sxevf_self_test_reg *test = self_test_reg; + struct sxevf_adapter *adapter = hw->adapter; + + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + switch (test->test_type) { + case PATTERN_TEST: + ret = sxevf_reg_pattern_test(hw, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case TABLE32_TEST: + ret = sxevf_reg_pattern_test(hw, + test->reg + (i * 4), + test->mask, test->write); + break; + case TABLE64_TEST_LO: + ret = sxevf_reg_pattern_test(hw, + test->reg + (i * 8), + test->mask, test->write); + break; + case TABLE64_TEST_HI: + ret = sxevf_reg_pattern_test(hw, + (test->reg + 4) + (i * 8), + test->mask, test->write); + break; + case SET_READ_TEST: + ret = sxevf_reg_set_and_check(hw, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case WRITE_NO_TEST: + SXEVF_REG_WRITE(hw, test->reg + (i * 0x40), + test->write); + break; + default: + LOG_ERROR_BDF("reg test mod err, type=%d\n", + test->test_type); + break; + } + + if (ret) { + goto l_end; + } + + } + test++; + } + +l_end: + return ret; +} + +static const struct sxevf_setup_operations sxevf_setup_ops = { + .reset = sxevf_hw_reset, + .hw_stop = sxevf_hw_stop, + .regs_test = sxevf_regs_test, + .regs_dump = sxevf_reg_dump, + .link_state_get = sxevf_link_state_get, + .reset_done = sxevf_hw_rst_done, +}; + +static void sxevf_tx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx) +{ + SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \ + DMA_BIT_MASK(32))); + SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32)); + SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len); + SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0); + SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0); + + return; +} + +static void sxevf_tx_writeback_off(struct sxevf_hw *hw, u8 reg_idx) +{ + SXEVF_REG_WRITE(hw, SXEVF_TDWBAH(reg_idx), 0); + SXEVF_REG_WRITE(hw, SXEVF_TDWBAL(reg_idx), 0); + + return; +} + +static void sxevf_tx_desc_thresh_set( + struct sxevf_hw *hw, + u8 reg_idx, + u32 wb_thresh, + u32 host_thresh, + u32 prefech_thresh) +{ + u32 txdctl = 0; + + txdctl |= (wb_thresh << SXEVF_TXDCTL_WTHRESH_SHIFT); + txdctl |= (host_thresh << SXEVF_TXDCTL_HTHRESH_SHIFT) | + prefech_thresh; + + SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl); + + return; +} + +void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on) +{ + u32 wait_loop = SXEVF_MAX_TXRX_DESC_POLL; + struct sxevf_adapter *adapter = hw->adapter; + + u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx)); + if (is_on) { + txdctl |= SXEVF_TXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl); + + do { + usleep_range(1000, 2000); + txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & SXEVF_TXDCTL_ENABLE)); + } else { + txdctl &= ~SXEVF_TXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl); + + do { + usleep_range(1000, 2000); + txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx)); + } while (--wait_loop && (txdctl & SXEVF_TXDCTL_ENABLE)); + } + + if (!wait_loop) { + LOG_DEV_ERR("tx ring %u switch %u failed within " + "the polling period\n", reg_idx, is_on); + } + + return; +} + +static void sxevf_rx_disable(struct sxevf_hw *hw, u8 reg_idx) +{ + u32 rxdctl; + u32 wait_loop = SXEVF_RX_RING_POLL_MAX; + struct sxevf_adapter *adapter = hw->adapter; + + if (!hw->reg_base_addr) { + goto l_end; + } + + rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx)); + rxdctl &= ~SXE_VFRXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl); + + do { + udelay(10); + rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & SXE_VFRXDCTL_ENABLE)); + + if (!wait_loop) { + LOG_ERROR_BDF("RXDCTL.ENABLE queue %d not cleared while polling\n", + reg_idx); + } + +l_end: + return; +} + +void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on) +{ + u32 rxdctl; + u32 wait_loop = SXEVF_RING_WAIT_LOOP; + struct sxevf_adapter *adapter = hw->adapter; + + rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx)); + if (is_on) { + rxdctl |= SXEVF_RXDCTL_ENABLE | SXEVF_RXDCTL_VME; + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl); + + do { + usleep_range(1000, 2000); + rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & SXEVF_RXDCTL_ENABLE)); + } else { + rxdctl &= ~SXEVF_RXDCTL_ENABLE; + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_idx), rxdctl); + + do { + usleep_range(1000, 2000); + rxdctl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & SXEVF_RXDCTL_ENABLE)); + } + + SXEVF_WRITE_FLUSH(hw); + + if (!wait_loop) { + LOG_DEV_ERR("rx ring %u switch %u failed within " + "the polling period\n", reg_idx, is_on); + } + + return; +} + +void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx) +{ + SXEVF_REG_WRITE(hw, SXE_VFRDBAL(reg_idx), + (desc_dma_addr & DMA_BIT_MASK(32))); + SXEVF_REG_WRITE(hw, SXE_VFRDBAH(reg_idx), (desc_dma_addr >> 32)); + SXEVF_REG_WRITE(hw, SXE_VFRDLEN(reg_idx), desc_mem_len); + + SXEVF_WRITE_FLUSH(hw); + + SXEVF_REG_WRITE(hw, SXE_VFRDH(reg_idx), 0); + SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), 0); + + return; +} + +void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx, + u32 header_buf_len, u32 pkg_buf_len, bool drop_en) +{ + u32 srrctl = 0; + + if (drop_en) { + srrctl = SXEVF_SRRCTL_DROP_EN; + } + + srrctl |= ((header_buf_len << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT) & + SXEVF_SRRCTL_BSIZEHDR_MASK); + srrctl |= ((pkg_buf_len >> SXEVF_SRRCTL_BSIZEPKT_SHIFT) & + SXEVF_SRRCTL_BSIZEPKT_MASK); + + SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(reg_idx), srrctl); + + return; +} + +static void sxevf_tx_ring_info_get(struct sxevf_hw *hw, + u8 idx, u32 *head, u32 *tail) +{ + *head = SXEVF_REG_READ(hw, SXE_VFTDH(idx)); + *tail = SXEVF_REG_READ(hw, SXE_VFTDT(idx)); + + return; +} + +static const struct sxevf_dma_operations sxevf_dma_ops = { + .tx_ring_desc_configure = sxevf_tx_ring_desc_configure, + .tx_writeback_off = sxevf_tx_writeback_off, + .tx_desc_thresh_set = sxevf_tx_desc_thresh_set, + .tx_ring_switch = sxevf_tx_ring_switch, + .tx_ring_info_get = sxevf_tx_ring_info_get, + + .rx_disable = sxevf_rx_disable, + .rx_ring_switch = sxevf_rx_ring_switch, + .rx_ring_desc_configure= sxevf_rx_ring_desc_configure, + .rx_rcv_ctl_configure = sxevf_rx_rcv_ctl_configure, +}; + +#ifdef SXE_DPDK +#define SXEVF_32BIT_COUNTER_UPDATE(reg, last, cur) \ + { \ + u32 latest = SXEVF_REG_READ(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ + last = latest; \ + } + +#define SXEVF_36BIT_COUNTER_UPDATE(lsb, msb, last, cur) \ + { \ + u64 new_lsb = SXEVF_REG_READ(hw, lsb); \ + u64 new_msb = SXEVF_REG_READ(hw, msb); \ + u64 latest = ((new_msb << 32) | new_lsb); \ + cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ + last = latest; \ + } + +#else +#define SXEVF_32BIT_COUNTER_UPDATE(reg, last_counter, counter) \ + { \ + u32 current_counter = SXEVF_REG_READ(hw, reg); \ + if (current_counter < last_counter) \ + counter += 0x100000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFFF00000000LL; \ + counter |= current_counter; \ + } + +#define SXEVF_36BIT_COUNTER_UPDATE(reg_lsb, reg_msb, last_counter, counter) \ + { \ + u64 current_counter_lsb = SXEVF_REG_READ(hw, reg_lsb); \ + u64 current_counter_msb = SXEVF_REG_READ(hw, reg_msb); \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ + if (current_counter < last_counter) \ + counter += 0x1000000000LL; \ + last_counter = current_counter; \ + counter &= 0xFFFFFFF000000000LL; \ + counter |= current_counter; \ + } +#endif + +void sxevf_packet_stats_get(struct sxevf_hw *hw, + struct sxevf_hw_stats *stats) +{ + SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPRC, stats->last_vfgprc, + stats->vfgprc); + SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFGPTC, stats->last_vfgptc, + stats->vfgptc); + SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGORC_LSB, SXEVF_VFGORC_MSB, + stats->last_vfgorc, + stats->vfgorc); + SXEVF_36BIT_COUNTER_UPDATE(SXEVF_VFGOTC_LSB, SXEVF_VFGOTC_MSB, + stats->last_vfgotc, + stats->vfgotc); + SXEVF_32BIT_COUNTER_UPDATE(SXEVF_VFMPRC, stats->last_vfmprc, + stats->vfmprc); + + return; +} + +void sxevf_stats_init_value_get(struct sxevf_hw *hw, + struct sxevf_hw_stats *stats) +{ + stats->last_vfgprc = SXEVF_REG_READ(hw, SXE_VFGPRC); + stats->last_vfgorc = SXEVF_REG_READ(hw, SXE_VFGORC_LSB); + stats->last_vfgorc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGORC_MSB))) << 32); + stats->last_vfgptc = SXEVF_REG_READ(hw, SXE_VFGPTC); + stats->last_vfgotc = SXEVF_REG_READ(hw, SXE_VFGOTC_LSB); + stats->last_vfgotc |= (((u64)(SXEVF_REG_READ(hw, SXE_VFGOTC_MSB))) << 32); + stats->last_vfmprc = SXEVF_REG_READ(hw, SXE_VFMPRC); + + return; +} +static const struct sxevf_stat_operations sxevf_stat_ops = { + .packet_stats_get = sxevf_packet_stats_get, + .stats_init_value_get = sxevf_stats_init_value_get, +}; + +static void sxevf_rx_max_used_ring_set(struct sxevf_hw *hw, u16 max_rx_ring) +{ + u32 rqpl = 0; + + if (max_rx_ring > 1) { + rqpl |= BIT(29); + } + + SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, rqpl); + + return; +} + +static const struct sxevf_dbu_operations sxevf_dbu_ops = { + .rx_max_used_ring_set = sxevf_rx_max_used_ring_set, +}; + +static const struct sxevf_mbx_operations sxevf_mbx_ops = { + + .mailbox_read = sxevf_mailbox_read, + .mailbox_write = sxevf_mailbox_write, + + .msg_write = sxevf_msg_write, + .msg_read = sxevf_msg_read, + + .pf_req_irq_trigger = sxevf_pf_req_irq_trigger, + .pf_ack_irq_trigger = sxevf_pf_ack_irq_trigger, +}; + +void sxevf_hw_ops_init(struct sxevf_hw *hw) +{ + hw->setup.ops = &sxevf_setup_ops; + hw->irq.ops = &sxevf_irq_ops; + hw->mbx.ops = &sxevf_mbx_ops; + hw->dma.ops = &sxevf_dma_ops; + hw->stat.ops = &sxevf_stat_ops; + hw->dbu.ops = &sxevf_dbu_ops; + + return; +} + +#ifdef SXE_DPDK + +#define SXEVF_RSS_FIELD_MASK 0xffff0000 +#define SXEVF_MRQC_RSSEN 0x00000001 + +#define SXEVF_RSS_KEY_SIZE (40) +#define SXEVF_MAX_RSS_KEY_ENTRIES (10) +#define SXEVF_MAX_RETA_ENTRIES (128) + +void sxevf_rxtx_reg_init(struct sxevf_hw *hw) +{ + int i; + u32 vfsrrctl; + + vfsrrctl = 0x100 << SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT; + vfsrrctl |= 0x800 >> SXEVF_SRRCTL_BSIZEPKT_SHIFT; + + SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, 0); + + for (i = 0; i < 7; i++) { + SXEVF_REG_WRITE(hw, SXE_VFRDH(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFRDT(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFSRRCTL(i), vfsrrctl); + SXEVF_REG_WRITE(hw, SXE_VFTDH(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFTDT(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFTXDCTL(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFTDWBAH(i), 0); + SXEVF_REG_WRITE(hw, SXE_VFTDWBAL(i), 0); + } + + SXEVF_WRITE_FLUSH(hw); + + return; +} + +u32 sxevf_irq_cause_get(struct sxevf_hw *hw) +{ + return SXEVF_REG_READ(hw, SXE_VFEICR); +} + +void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx) +{ + + SXEVF_REG_WRITE(hw, SXEVF_TDBAL(reg_idx), (desc_dma_addr & \ + DMA_BIT_MASK(32))); + SXEVF_REG_WRITE(hw, SXEVF_TDBAH(reg_idx), (desc_dma_addr >> 32)); + SXEVF_REG_WRITE(hw, SXEVF_TDLEN(reg_idx), desc_mem_len); + SXEVF_REG_WRITE(hw, SXEVF_TDH(reg_idx), 0); + SXEVF_REG_WRITE(hw, SXEVF_TDT(reg_idx), 0); + + return; +} + +void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFPSRTYPE, value); + + return; +} + +void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw, + u16 reg_index, bool is_enable) +{ + u32 vlnctrl; + + vlnctrl = SXEVF_REG_READ(hw, SXE_VFRXDCTL(reg_index)); + + if (is_enable) { + vlnctrl |= SXEVF_RXDCTL_VME; + } else { + vlnctrl &= ~SXEVF_RXDCTL_VME; + } + + SXEVF_REG_WRITE(hw, SXE_VFRXDCTL(reg_index), vlnctrl); + + return; +} + +void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx, + u32 prefech_thresh, u32 host_thresh, u32 wb_thresh) +{ + u32 txdctl = SXEVF_REG_READ(hw, SXEVF_TXDCTL(reg_idx)); + + txdctl |= (prefech_thresh & SXEVF_TXDCTL_THRESH_MASK); + txdctl |= ((host_thresh & SXEVF_TXDCTL_THRESH_MASK) << SXEVF_TXDCTL_HTHRESH_SHIFT); + txdctl |= ((wb_thresh & SXEVF_TXDCTL_THRESH_MASK)<< SXEVF_TXDCTL_WTHRESH_SHIFT); + + SXEVF_REG_WRITE(hw, SXEVF_TXDCTL(reg_idx), txdctl); + + return; +} + +void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFRDT(reg_idx), value); + + return; +} + +u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx) +{ + return SXEVF_REG_READ(hw, SXE_VFRETA(reg_idx >> 2)); +} + +void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw, + u16 reg_idx, u32 value) +{ + SXEVF_REG_WRITE(hw, SXE_VFRETA(reg_idx >> 2), value); + return; +} + +u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx) +{ + u32 rss_key; + + if (reg_idx >= SXEVF_MAX_RSS_KEY_ENTRIES) { + rss_key = 0; + } else { + rss_key = SXEVF_REG_READ(hw, SXE_VFRSSRK(reg_idx)); + } + + return rss_key; +} + +u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw) +{ + u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC); + return (mrqc & SXEVF_RSS_FIELD_MASK); +} + +bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw) +{ + bool rss_enable = false; + u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC); + if (mrqc & SXEVF_MRQC_RSSEN) { + rss_enable = true; + } + + return rss_enable; +} + +void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key) +{ + u32 i; + + for (i = 0; i < SXEVF_MAX_RSS_KEY_ENTRIES; i++) { + SXEVF_REG_WRITE(hw, SXE_VFRSSRK(i), rss_key[i]); + } + + return; +} + +void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on) +{ + u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC); + if (is_on) { + mrqc |= SXEVF_MRQC_RSSEN; + } else { + mrqc &= ~SXEVF_MRQC_RSSEN; + } + + SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc); + + return; +} + +void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field) +{ + u32 mrqc = SXEVF_REG_READ(hw, SXE_VFMRQC); + + mrqc &= ~SXEVF_RSS_FIELD_MASK; + mrqc |= rss_field; + SXEVF_REG_WRITE(hw, SXE_VFMRQC, mrqc); + + return; +} + +u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw, + const struct sxevf_reg_info *regs, + u32 *reg_buf) +{ + u32 j, i = 0; + int count = 0; + + while (regs[i].count) { + for (j = 0; j < regs[i].count; j++) { + reg_buf[count + j] = SXEVF_REG_READ(hw, + regs[i].addr + j * regs[i].stride); + LOG_INFO("regs= %s, regs_addr=%x, regs_value=%04x\n", + regs[i].name , regs[i].addr, reg_buf[count + j]); + } + + i++; + count += j; + } + + return count; +}; + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.h new file mode 100644 index 000000000000..2c4823fab865 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_hw.h @@ -0,0 +1,348 @@ + +#ifndef __SXEVF_HW_H__ +#define __SXEVF_HW_H__ + +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#include +#include +#include +#else +#include "sxe_compat_platform.h" +#ifdef SXE_HOST_DRIVER +#include "sxe_drv_type.h" +#endif +#endif + +#include "sxevf_regs.h" + +#if defined (__KERNEL__) || defined (SXE_KERNEL_TEST) +#define SXE_PRIU64 "llu" +#define SXE_PRIX64 "llx" +#define SXE_PRID64 "lld" +#else +#define SXE_PRIU64 PRIu64 +#define SXE_PRIX64 PRIx64 +#define SXE_PRID64 PRId64 +#endif + +#define SXEVF_TXRX_RING_NUM_MAX 8 +#define SXEVF_MAX_TXRX_DESC_POLL (10) +#define SXEVF_TX_DESC_PREFETCH_THRESH_32 (32) +#define SXEVF_TX_DESC_HOST_THRESH_1 (1) +#define SXEVF_TX_DESC_WRITEBACK_THRESH_8 (8) +#define SXEVF_TXDCTL_HTHRESH_SHIFT (8) +#define SXEVF_TXDCTL_WTHRESH_SHIFT (16) + +#define SXEVF_TXDCTL_THRESH_MASK (0x7F) + +#define SXEVF_RX_RING_POLL_MAX (10) + +#define SXEVF_MAC_HDR_LEN_MAX (127) +#define SXEVF_NETWORK_HDR_LEN_MAX (511) + +#define SXEVF_LINK_SPEED_UNKNOWN 0 +#define SXEVF_LINK_SPEED_1GB_FULL 0x0020 +#define SXEVF_LINK_SPEED_10GB_FULL 0x0080 +#define SXEVF_LINK_SPEED_100_FULL 0x0008 + +#define SXEVF_VFT_TBL_SIZE (128) +#define SXEVF_HW_TXRX_RING_NUM_MAX (128) + +#define SXEVF_VLAN_TAG_SIZE (4) + +#define SXEVF_HW_UC_ENTRY_NUM_MAX 128 + +enum { + SXEVF_LINK_TO_PHY = 0, + SXEVF_LINK_TO_DOWN, + SXEVF_LINK_TO_REINIT, +}; + +enum { + SXEVF_DIAG_TEST_PASSED = 0, + SXEVF_DIAG_TEST_BLOCKED = 1, + SXEVF_DIAG_REG_PATTERN_TEST_ERR = 2, + SXEVF_DIAG_CHECK_REG_TEST_ERR = 3, +}; + +struct sxevf_hw; + +struct sxevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +void sxevf_hw_ops_init(struct sxevf_hw *hw); + + +struct sxevf_setup_operations { + void (*reset)(struct sxevf_hw *); + void (*hw_stop)(struct sxevf_hw *hw); + s32 (*regs_test)(struct sxevf_hw *hw); + u32 (*link_state_get)(struct sxevf_hw *hw); + u32 (*regs_dump)(struct sxevf_hw *hw, u32 *regs_buff, u32 buf_size); + bool (*reset_done)(struct sxevf_hw *); +}; + +struct sxevf_hw_setup { + const struct sxevf_setup_operations *ops; +}; + +struct sxevf_irq_operations { + void (*pending_irq_clear)(struct sxevf_hw *hw); + void (*ring_irq_interval_set)(struct sxevf_hw *hw, u16 irq_idx, u32 interval); + void (*event_irq_interval_set)(struct sxevf_hw * hw, u16 irq_idx, u32 value); + void (*ring_irq_map)(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 irq_idx); + void (*event_irq_map)(struct sxevf_hw *hw, u16 irq_idx); + void (*ring_irq_trigger)(struct sxevf_hw *hw, u64 eics); + void (*irq_enable)(struct sxevf_hw * hw, u32 mask); + void (*specific_irq_enable)(struct sxevf_hw * hw, u32 value); + void (*irq_disable)(struct sxevf_hw *hw); + void (*irq_off)(struct sxevf_hw *hw); +}; + +struct sxevf_irq_info { + const struct sxevf_irq_operations *ops; +}; + +struct sxevf_mbx_operations { + + u32 (*mailbox_read)(struct sxevf_hw *hw); + void (*mailbox_write)(struct sxevf_hw *hw, u32 value); + + void (*msg_write)(struct sxevf_hw *hw, u8 index, u32 msg); + u32 (*msg_read)(struct sxevf_hw *hw, u8 index); + + void (*pf_req_irq_trigger)(struct sxevf_hw *hw); + void (*pf_ack_irq_trigger)(struct sxevf_hw *hw); +}; + +struct sxevf_mbx_stats { + u32 send_msgs; + u32 rcv_msgs; + + u32 reqs; + u32 acks; + u32 rsts; +}; + +struct sxevf_mbx_info { + const struct sxevf_mbx_operations *ops; + + struct sxevf_mbx_stats stats; + u32 msg_len; + u32 retry; + u32 interval; + u32 reg_value; + u32 api_version; +}; + +struct sxevf_dma_operations { + void (* tx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8); + void (* tx_writeback_off)(struct sxevf_hw *, u8); + void (* tx_desc_thresh_set)(struct sxevf_hw *, u8, u32, u32, u32); + void (* tx_ring_switch)(struct sxevf_hw *, u8, bool); + void (* tx_desc_wb_flush)(struct sxevf_hw *, u8); + void (* tx_ring_info_get)(struct sxevf_hw *hw, u8 reg_idx, + u32 *head, u32 *tail); + void (* rx_disable)(struct sxevf_hw *, u8); + void (* rx_ring_switch)(struct sxevf_hw *, u8, bool); + void (* rx_ring_desc_configure)(struct sxevf_hw *, u32, u64, u8); + void (* rx_rcv_ctl_configure)(struct sxevf_hw *hw, u8 reg_idx, + u32 header_buf_len, u32 pkg_buf_len, bool drop_en); +}; + +struct sxevf_dma_info { + const struct sxevf_dma_operations *ops; +}; + +struct sxevf_stat_operations { + void (*packet_stats_get)(struct sxevf_hw *, + struct sxevf_hw_stats *); + void (*stats_init_value_get)(struct sxevf_hw *hw, + struct sxevf_hw_stats *stats); +}; + +struct sxevf_stat_info { + const struct sxevf_stat_operations *ops; +}; + +struct sxevf_dbu_operations { + void (*rx_max_used_ring_set)(struct sxevf_hw *, u16); + +}; + +struct sxevf_dbu_info { + const struct sxevf_dbu_operations *ops; +}; + +enum sxevf_hw_state { + SXEVF_HW_STOP, + SXEVF_HW_FAULT, +}; + +struct sxevf_hw { + u8 __iomem *reg_base_addr; + void *adapter; + + void *priv; + unsigned long state; + void (*fault_handle)(void *priv); + u32 (*reg_read)(const volatile void *reg); + void (*reg_write)(u32 value, volatile void *reg); + s32 board_type; + + struct sxevf_hw_setup setup; + struct sxevf_irq_info irq; + struct sxevf_mbx_info mbx; + + struct sxevf_dma_info dma; + struct sxevf_stat_info stat; + struct sxevf_dbu_info dbu; +}; + +struct sxevf_reg_info { + u32 addr; + u32 count; + u32 stride; + const s8 *name; +}; + +u16 sxevf_reg_dump_num_get(void); + +void sxevf_hw_fault_handle(struct sxevf_hw *hw); + +static inline bool sxevf_is_hw_fault(struct sxevf_hw *hw) +{ + return test_bit(SXEVF_HW_FAULT, &hw->state); +} + +static inline void sxevf_hw_fault_handle_init(struct sxevf_hw *hw, + void (*handle)(void *), void *priv) +{ + hw->priv = priv; + hw->fault_handle = handle; + + return; +} + +static inline void sxevf_hw_reg_handle_init(struct sxevf_hw *hw, + u32 (*read)(const volatile void *), + void (*write)(u32, volatile void *)) +{ + hw->reg_read = read; + hw->reg_write = write; + + return; +} + +#ifdef SXE_DPDK + +void sxevf_irq_disable(struct sxevf_hw *hw); + +void sxevf_hw_stop(struct sxevf_hw *hw); + +void sxevf_hw_reset(struct sxevf_hw *hw); + +void sxevf_msg_write(struct sxevf_hw *hw, u8 index, u32 msg); + +u32 sxevf_msg_read(struct sxevf_hw *hw, u8 index); + +u32 sxevf_mailbox_read(struct sxevf_hw *hw); + +void sxevf_mailbox_write(struct sxevf_hw *hw, u32 value); + +void sxevf_pf_req_irq_trigger(struct sxevf_hw *hw); + +void sxevf_pf_ack_irq_trigger(struct sxevf_hw *hw); + +void sxevf_rxtx_reg_init(struct sxevf_hw *hw); + +void sxevf_irq_enable(struct sxevf_hw *hw, u32 mask); + +u32 sxevf_irq_cause_get(struct sxevf_hw *hw); + +void sxevf_event_irq_map(struct sxevf_hw *hw, u16 vector); + +void sxevf_hw_ring_irq_map(struct sxevf_hw *hw, bool is_tx, u16 hw_ring_idx, u16 vector); + +void sxevf_ring_irq_interval_set(struct sxevf_hw *hw, u16 irq_idx, u32 interval); + +void sxevf_tx_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx); + +void sxevf_rx_ring_desc_configure(struct sxevf_hw *hw, u32 desc_mem_len, + u64 desc_dma_addr, u8 reg_idx); + +void sxevf_rx_rcv_ctl_configure(struct sxevf_hw *hw, u8 reg_idx, + u32 header_buf_len, u32 pkg_buf_len, + bool drop_en); + +void sxevf_rss_bit_num_set(struct sxevf_hw *hw, u32 value); + +void sxevf_hw_vlan_tag_strip_switch(struct sxevf_hw *hw, + u16 reg_index, bool is_enable); + +void sxevf_tx_queue_thresh_set(struct sxevf_hw *hw, u8 reg_idx, + u32 prefech_thresh, u32 host_thresh, u32 wb_thresh); + +void sxevf_tx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on); + +void sxevf_rx_ring_switch(struct sxevf_hw *hw, u8 reg_idx, bool is_on); + +void sxevf_rx_desc_tail_set(struct sxevf_hw *hw, u8 reg_idx, u32 value); + +void sxevf_specific_irq_enable(struct sxevf_hw *hw, u32 value); + +void sxevf_packet_stats_get(struct sxevf_hw *hw, + struct sxevf_hw_stats *stats); + +void sxevf_stats_init_value_get(struct sxevf_hw *hw, + struct sxevf_hw_stats *stats); + +u32 sxevf_hw_rss_redir_tbl_get(struct sxevf_hw *hw, u16 reg_idx); + +void sxevf_hw_rss_redir_tbl_set(struct sxevf_hw *hw, + u16 reg_idx, u32 value); + +u32 sxevf_hw_rss_key_get(struct sxevf_hw *hw, u8 reg_idx); + +u32 sxevf_hw_rss_field_get(struct sxevf_hw *hw); + +void sxevf_hw_rss_field_set(struct sxevf_hw *hw, u32 rss_field); + +void sxevf_hw_rss_cap_switch(struct sxevf_hw *hw, bool is_on); + +void sxevf_hw_rss_key_set_all(struct sxevf_hw *hw, u32 *rss_key); + +bool sxevf_hw_is_rss_enabled(struct sxevf_hw *hw); + +u32 sxevf_link_state_get(struct sxevf_hw *hw); + +u32 sxevf_hw_regs_group_read(struct sxevf_hw *hw, + const struct sxevf_reg_info *regs, + u32 *reg_buf); + +#endif +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.c new file mode 100644 index 000000000000..bf1bfa9534ab --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.c @@ -0,0 +1,824 @@ +#ifdef SXE_IPSEC_CONFIGURE +#include +#include +#include + +#include "sxevf_ipsec.h" +#include "sxevf_tx_proc.h" +#include "sxevf_ring.h" +#include "sxevf_msg.h" + +static const char ipsec_aes_name[] = "rfc4106(gcm(aes))"; + +s32 sxevf_ipsec_sa_add(struct sxevf_adapter *adapter, + struct xfrm_state *xs, u32 *pf_sa_idx) +{ + struct sxevf_ipsec_add_msg msg = {}; + struct sxevf_hw *hw = &adapter->hw; + s32 ret; + + msg.msg_type = SXEVF_IPSEC_ADD; + + msg.flags = xs->xso.flags; + msg.spi = xs->id.spi; + msg.proto = xs->id.proto; + msg.family = xs->props.family; + + if (xs->props.family == AF_INET6) + memcpy(msg.addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); + else + memcpy(msg.addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); + + memcpy(msg.key, xs->aead->alg_key, sizeof(msg.key)); + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + if (!ret && (msg.msg_type == (SXEVF_IPSEC_ADD | SXEVF_MSGTYPE_ACK))) { + if (pf_sa_idx) { + *pf_sa_idx = msg.pf_sa_idx; + } + LOG_INFO("xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf_sa_idx:%u\n", + xs->xso.flags, xs->id.spi, + xs->id.proto, xs->props.family, + msg.pf_sa_idx); + } else { + LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + xs->xso.flags, xs->id.spi, + xs->id.proto, xs->props.family, ret); + } + + return ret; +} + +s32 sxevf_ipsec_sa_del(struct sxevf_adapter *adapter, u32 pf_sa_idx) +{ + struct sxevf_ipsec_del_msg msg = {}; + struct sxevf_hw *hw = &adapter->hw; + s32 ret; + + msg.msg_type = SXEVF_IPSEC_DEL; + msg.sa_idx = pf_sa_idx; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + if (ret) { + LOG_ERROR("del pf sa:%d fail.(err:%d)\n", pf_sa_idx, ret); + goto l_end; + } + +l_end: + return ret; +} + +STATIC inline bool sxevf_need_tx_ipsec_offload(struct sk_buff *skb) +{ + struct sec_path *sp = skb->sp; + bool ret = true; + + if (!sp || !sp->olen || (sp->len != sp->olen)) { + ret = false; + } + + return ret; +} + +STATIC struct xfrm_state *sxevf_ipsec_rx_sa_match(struct sxevf_ipsec_context *ipsec, + __be32 spi, u8 proto, __be32 *daddr, u8 daddr_len) +{ + struct sxevf_rx_sa *sa = NULL; + struct xfrm_state *xs = NULL; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_table_list, sa, hlist, + (__force u32) spi) { + if (spi == sa->xs->id.spi && + proto == sa->xs->id.proto && + !memcmp(daddr, &(sa->xs->id.daddr), daddr_len)) { + xs = sa->xs; + xfrm_state_hold(xs); + break; + } + } + rcu_read_unlock(); + + return xs; +} + +static s32 sxevf_ipsec_tx_offload_param_valid(struct sk_buff *skb, + struct sxevf_tx_sa *sa, + u32 *vf_sa_idx, struct xfrm_state **xfrm_state) +{ + s32 ret = -SXEVF_ERR_ARGUMENT_INVALID; + u32 idx; + struct sec_path *path; + struct xfrm_state *xs; + + path = skb_sec_path(skb); + if (unlikely(!path->len)) { + LOG_DEV_ERR("security path len:0 invalid.\n"); + goto l_out; + } + + xs = xfrm_input_state(skb); + if (unlikely(!xs)) { + LOG_DEV_ERR("security input xs NULL.\n"); + goto l_out; + } + + *xfrm_state = xs; + idx = xs->xso.offload_handle - SXEVF_IPSEC_TX_INDEX_BASE; + if (idx >= SXEVF_IPSEC_SA_CNT_MAX) { + LOG_DEV_ERR("invalid offload_handle:%lu idx:%d.\n", + xs->xso.offload_handle, idx); + goto l_out; + } + + if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &sa[idx].status)) { + LOG_DEV_ERR("tx_table[%d] not used.\n", idx); + goto l_out; + } + + *vf_sa_idx = idx; + + LOG_INFO("vf_sa_idx:%u tx ipsec offload valid passed\n", + *vf_sa_idx); + ret = 0; + +l_out: + return ret; +} + +s32 sxevf_tx_ipsec_offload(struct sxevf_ring *tx_ring, + struct sxevf_tx_buffer *first, + struct sxevf_tx_context_desc *ctxt_desc) +{ + u32 vf_sa_idx; + s32 ret = 0; + struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct sxevf_tx_sa *sa = ipsec->tx_table; + struct xfrm_state *xfrm_state = NULL; + u32 tucmd_ipsec = 0; + + if (!sxevf_need_tx_ipsec_offload(first->skb)) { + LOG_DEBUG("ring[%u] no need offload IPsec.\n", tx_ring->idx); + goto l_out; + } + + ret = sxevf_ipsec_tx_offload_param_valid(first->skb, sa, &vf_sa_idx, &xfrm_state); + if (ret) { + LOG_ERROR("ring[%d ]tx ipsec valid failed.\n", tx_ring->idx); + goto l_out; + } + + first->tx_features |= SXEVF_TX_FEATURE_IPSEC | SXEVF_TX_FEATURE_CSUM; + + if (xfrm_state->id.proto == IPPROTO_ESP) { + tucmd_ipsec = SXEVF_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP | + SXEVF_TX_CTXTD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) { + tucmd_ipsec |= SXEVF_TX_CTXTD_TUCMD_IPV4; + } + + if (!skb_is_gso(first->skb)) { + const u32 auth_len = SXEVF_IPSEC_AUTH_BIT_LEN / CHAR_BITS; + u8 pad_len; + + ret = skb_copy_bits(first->skb, first->skb->len - SXEVF_IPSEC_PADLEN_OFFSET, + &pad_len, SXEVF_IPSEC_PADLEN_BYTE); + if (unlikely(ret)) { + LOG_ERROR("auth_len:%d offset:%d copy skb " + "failed.(err:%d)\n", + auth_len, + first->skb->len - SXEVF_IPSEC_PADLEN_OFFSET, + ret); + goto l_out; + } + tucmd_ipsec |= (SXEVF_IPSEC_PADLEN_OFFSET + pad_len); + } + } + + if (sa[vf_sa_idx].encrypt) { + tucmd_ipsec |= SXEVF_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN; + } + + sxevf_ctxt_desc_sa_idx_set(ctxt_desc, vf_sa_idx); + + sxevf_ctxt_desc_tucmd_set(ctxt_desc, tucmd_ipsec); + +l_out: + return ret; +} + +void sxevf_rx_ipsec_proc(struct sxevf_ring *tx_ring, + union sxevf_rx_data_desc *desc, + struct sk_buff *skb) +{ + s32 ret = 0; + struct sxevf_adapter *adapter = netdev_priv(tx_ring->netdev); + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + __le16 pkt_info = desc->wb.lower.lo_dword.hs_rss.pkt_info; + struct iphdr *ip4_hdr = NULL; + struct ipv6hdr *ip6_hdr = NULL; + void *daddr = NULL; + unsigned long daddr_len; + u8 *sec_hdr = NULL; + struct xfrm_state *xs = NULL; + struct xfrm_offload *offload = NULL; + __be32 spi; + u8 proto; + + if (!sxevf_status_err_check(desc, SXEVF_RXD_STAT_SECP)) { + LOG_DEBUG("not security packet, no need parse " + "security header.\n"); + goto l_out; + } + + if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPV4)) { + ip4_hdr = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4_hdr->daddr; + daddr_len = sizeof(ip4_hdr->daddr); + sec_hdr = (u8 *)ip4_hdr + ip4_hdr->ihl * SXEVF_IP_HEAD_LEN_UNIT; + } else if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPV6)) { + ip6_hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6_hdr->daddr; + daddr_len = sizeof(ip6_hdr->daddr); + sec_hdr = (u8 *)ip6_hdr + sizeof(struct ipv6hdr); + } else { + ret = -SXEVF_ERR_DEVICE_NOT_SUPPORTED; + LOG_ERROR("sxe security not support L3 protocol:0x%x.(err:%d)\n", + desc->wb.lower.lo_dword.hs_rss.pkt_info, ret); + goto l_out; + }; + + if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPSEC_ESP)) { + spi = ((struct ip_esp_hdr *)sec_hdr)->spi; + proto = IPPROTO_ESP; + } else if (pkt_info & cpu_to_le16(SXEVF_RXDADV_PKTTYPE_IPSEC_AH)) { + spi = ((struct ip_auth_hdr *)sec_hdr)->spi; + proto = IPPROTO_AH; + } else { + ret = -SXEVF_ERR_DEVICE_NOT_SUPPORTED; + LOG_ERROR("sxe security not support security protocol:0x%x.(err:%d)\n", + desc->wb.lower.lo_dword.hs_rss.pkt_info, ret); + goto l_out; + } + + xs = sxevf_ipsec_rx_sa_match(ipsec, spi, proto, daddr, *(u8 *)&daddr_len); + if (!xs) { + ret = -SXEVF_ERR_IPSEC_SA_STATE_NOT_EXSIT; + LOG_ERROR("spi:0x%x, proto:0x%x daddr:%pI6 daddr_len:%lu" + "not matched sw rx sa entry.(err:%d)", + spi, proto, daddr, daddr_len, ret); + goto l_out; + } + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) { + LOG_INFO("skb security path null.\n"); + goto l_out; + } + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + + offload = xfrm_offload(skb); + offload->flags = CRYPTO_DONE; + offload->status = CRYPTO_SUCCESS; + + ipsec->rx_ipsec++; + +l_out: + return; +} + +static bool sxevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + bool ret = true; + + if (xs->props.family == AF_INET) { + if (ip_hdr(skb)->ihl != 5) { + LOG_ERROR("sxe ipsec offload unsupport ipv4 " + "header with option, hdr len:%d.\n", + ip_hdr(skb)->ihl); + ret = false; + } + } else { + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) { + LOG_ERROR("sxe ipsec offload unsupport ipv6 " + "header with ext hdr\n"); + ret = false; + } + } + + return ret; +} + +static s32 sxevf_ipsec_param_valid(struct xfrm_state *xs) +{ + s32 ret = -EINVAL; + + if ((xs->id.proto != IPPROTO_ESP) && + (xs->id.proto != IPPROTO_AH)) { + LOG_DEV_ERR("flags:%u offload:0x%lx unsupport " + "security protol:0x%x.\n", + xs->xso.flags, + xs->xso.offload_handle, + xs->id.proto); + goto l_out; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + if (xs->calg) { + LOG_DEV_ERR("proto:%u flags:%u offload:0x%lx unsupport " + "compression offload\n", + xs->id.proto, + xs->xso.flags, + xs->xso.offload_handle); + goto l_out; + } + } + + ret = 0; + + LOG_INFO("proto:%u flags:%u offload:0x%lx ipsec param valid pass\n", + xs->id.proto, + xs->xso.flags, + xs->xso.offload_handle); + +l_out: + return ret; +} + +static s32 sxevf_ipsec_sa_idx_get(struct sxevf_ipsec_context *ipsec, bool is_rx) +{ + s32 ret = -ENOSPC; + u16 i; + + if (is_rx) { + if (ipsec->rx_sa_cnt == SXEVF_IPSEC_SA_CNT_MAX) { + LOG_ERROR("ipsec rx sa cnt reach limit:%u.\n", + SXEVF_IPSEC_SA_CNT_MAX); + goto l_out; + } + + for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) { + if (!test_and_set_bit(SXEVF_IPSEC_SA_ENTRY_USED, + &ipsec->rx_table[i].status)) { + ret = i; + break; + } + } + } else { + if (ipsec->tx_sa_cnt == SXEVF_IPSEC_SA_CNT_MAX) { + LOG_ERROR("ipsec tx sa cnt reach limit:%u.\n", + SXEVF_IPSEC_SA_CNT_MAX); + goto l_out; + } + + for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) { + if (!test_and_set_bit(SXEVF_IPSEC_SA_ENTRY_USED, + &ipsec->tx_table[i].status)) { + ret = i; + break; + } + } + } + +l_out: + return ret; +} + +static s32 sxevf_ipsec_key_salt_parse(struct xfrm_state *xs, + u32 *key, u32 *salt) +{ + s32 ret = 0; + s8 *xs_key; + unsigned long len; + + if (!xs->aead) { + ret = -EINVAL; + LOG_DEV_ERR("ipsec offload algorithm unsupport.(err:%d)\n", ret); + goto l_out; + } + + if (xs->aead->alg_icv_len != SXEVF_IPSEC_AUTH_BIT_LEN) { + ret = -EINVAL; + LOG_DEV_ERR("ipsec offload icv len:%u " + "unsupport.(err:%d)\n", + xs->aead->alg_icv_len, ret); + goto l_out; + } + + if (strcmp(xs->aead->alg_name, ipsec_aes_name)) { + ret = -EINVAL; + LOG_DEV_ERR("unsupport alg name:%s, just support alg:%s.(err:%d)\n", + xs->aead->alg_name, ipsec_aes_name, ret); + goto l_out; + } + + xs_key = xs->aead->alg_key; + len = xs->aead->alg_key_len; + + if (len == SXEVF_IPSEC_KEY_SALT_BIT_LEN) { + *salt = *(u32 *)(xs_key + SXEVF_IPSEC_KEY_BYTE_LEN); + } else if (len == SXEVF_IPSEC_KEY_BIT_LEN) { + *salt = 0; + } else { + ret = -EINVAL; + LOG_DEV_ERR("unsupport key_salt len:%lu.(err:%d)\n", len, ret); + goto l_out; + } + + memcpy(key, xs_key, sizeof(u32) * SXEVF_IPSEC_KEY_LEN); + + LOG_INFO("ipsec offload flag:0x%x key_salt len:%lu " + "salt:%u key:0x%x%x%x%x.\n", + xs->xso.flags ,len, + *salt, key[0], key[1], key[2], key[3]); + +l_out: + return ret; +} + +static s32 sxevf_ipsec_rx_sa_entry_fill(struct xfrm_state *xs, + struct sxevf_rx_sa *sa_entry) +{ + s32 ret; + + memset(sa_entry, 0, sizeof(*sa_entry)); + + sa_entry->xs = xs; + + if (xs->id.proto & IPPROTO_ESP) { + sa_entry->decrypt = !!((xs->ealg) || (xs->aead)); + } + + ret = sxevf_ipsec_key_salt_parse(xs, sa_entry->key, &sa_entry->salt); + if (ret) { + LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n", + ret); + goto l_out; + } + + if (xs->props.family == AF_INET6) { + memcpy(sa_entry->ip_addr, &xs->id.daddr.a6, SXEVF_IPV6_ADDR_SIZE); + } else { + memcpy(&sa_entry->ip_addr[SXEVF_IPV4_ADDR_SIZE - 1], + &xs->id.daddr.a4, + SXEVF_IPV4_ADDR_SIZE); + } + + sa_entry->mode = SXEVF_IPSEC_RXMOD_VALID; + if (sa_entry->xs->id.proto & IPPROTO_ESP) { + sa_entry->mode |= SXEVF_IPSEC_RXMOD_PROTO_ESP; + } + + if (sa_entry->decrypt) { + sa_entry->mode |= SXEVF_IPSEC_RXMOD_DECRYPT; + } + + if (sa_entry->xs->props.family == AF_INET6) { + sa_entry->mode |= SXEVF_IPSEC_RXMOD_IPV6; + } + +l_out: + return ret; +} + +static s32 sxevf_ipsec_sa_add_to_pf(struct sxevf_adapter *adapter, + struct xfrm_state *xs, u32 *pf_sa_idx) +{ + s32 ret; + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_ipsec_sa_add(adapter, xs, pf_sa_idx); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) { + LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + xs->xso.flags, xs->id.spi, + xs->id.proto, xs->props.family, ret); + } + + return ret; +} + +static s32 sxevf_ipsec_rx_xs_add(struct sxevf_adapter *adapter, + struct xfrm_state *xs) +{ + struct sxevf_rx_sa sa_entry; + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 vf_sa_idx; + s32 ret; + u32 pf_sa_idx; + + ret = sxevf_ipsec_sa_idx_get(ipsec, true); + if (ret < 0) { + LOG_DEV_ERR("rx_sa_cnt:%d rx sa table no space.(err:%d)\n", + ipsec->rx_sa_cnt, ret); + goto l_out; + } + + vf_sa_idx = (u32)ret; + + sa_entry.status = ipsec->rx_table[vf_sa_idx].status; + + ret = sxevf_ipsec_rx_sa_entry_fill(xs, &sa_entry); + if (ret) { + LOG_ERROR("ipsec offload param parse fail.(err:%d)\n", ret); + goto clear_used_xs; + } + + ret = sxevf_ipsec_sa_add_to_pf(adapter, xs, &pf_sa_idx); + if (ret) { + LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + xs->xso.flags, xs->id.spi, + xs->id.proto, xs->props.family, ret); + goto clear_used_xs; + } + + memcpy(&ipsec->rx_table[vf_sa_idx], &sa_entry, sizeof(sa_entry)); + sa_entry.pf_sa_idx = pf_sa_idx; + xs->xso.offload_handle = vf_sa_idx + SXEVF_IPSEC_RX_INDEX_BASE; + + ipsec->rx_sa_cnt++; + + LOG_INFO("tx_sa_table[%u] add done pf_sa_idx:%u rx_sa_cnt:%u.\n", + vf_sa_idx, pf_sa_idx, ipsec->rx_sa_cnt); + +l_out: + return ret; + +clear_used_xs: + clear_bit(SXEVF_IPSEC_SA_ENTRY_USED, &ipsec->rx_table[vf_sa_idx].status); + + return ret; +} + +static s32 sxevf_ipsec_tx_xs_add(struct sxevf_adapter *adapter, + struct xfrm_state *xs) +{ + struct sxevf_tx_sa sa_entry; + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 vf_sa_idx; + s32 ret; + u32 pf_sa_idx; + + ret = sxevf_ipsec_sa_idx_get(ipsec, false); + if (ret < 0) { + LOG_DEV_ERR("tx_sa_cnt:%d tx sa table no space.(err:%d)\n", + ipsec->tx_sa_cnt, ret); + goto l_out; + } + + vf_sa_idx = (u32)ret; + + memset(&sa_entry, 0, sizeof(struct sxevf_tx_sa)); + + sa_entry.xs = xs; + sa_entry.status = ipsec->tx_table[vf_sa_idx].status; + + if (xs->id.proto & IPPROTO_ESP) { + sa_entry.encrypt = !!((xs->ealg) || (xs->aead)); + } + + ret = sxevf_ipsec_key_salt_parse(xs, sa_entry.key, &sa_entry.salt); + if (ret) { + LOG_DEV_ERR("ipsec offload key salt param parse fail.(err:%d)\n", + ret); + goto clear_used_xs; + } + + ret = sxevf_ipsec_sa_add_to_pf(adapter, xs, &pf_sa_idx); + if (ret) { + LOG_ERROR("xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + xs->xso.flags, xs->id.spi, + xs->id.proto, xs->props.family, ret); + goto clear_used_xs; + } + + memcpy(&ipsec->tx_table[vf_sa_idx], &sa_entry, sizeof(sa_entry)); + sa_entry.pf_sa_idx = pf_sa_idx; + xs->xso.offload_handle = vf_sa_idx + SXEVF_IPSEC_TX_INDEX_BASE; + + ipsec->tx_sa_cnt++; + + LOG_INFO("tx_sa_table[%u] add done pf_sa_idx:%u tx_sa_cnt:%u.\n", + vf_sa_idx, pf_sa_idx, ipsec->tx_sa_cnt); + +l_out: + return ret; + +clear_used_xs: + clear_bit(SXEVF_IPSEC_SA_ENTRY_USED, &ipsec->tx_table[vf_sa_idx].status); + + return ret; +} + +static s32 sxevf_ipsec_state_add(struct xfrm_state *xs) +{ + s32 ret; + struct net_device *net_dev = xs->xso.dev; + struct sxevf_adapter *adapter = netdev_priv(net_dev); + + ret = sxevf_ipsec_param_valid(xs); + if (ret) { + LOG_ERROR("ipsec offload param invalid.(err:%d)\n", + ret); + goto l_out; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + ret = sxevf_ipsec_rx_xs_add(adapter, xs); + } else { + ret = sxevf_ipsec_tx_xs_add(adapter, xs); + } + + if (ret) { + LOG_ERROR("offload_handle:%lu flag:0x%x sa add fail.(err:%d)\n", + xs->xso.offload_handle, + xs->xso.flags, ret); + goto l_out; + } + +l_out: + return ret; +} + +static void sxevf_ipsec_state_delete(struct xfrm_state *xs) +{ + struct net_device *netdev = xs->xso.dev; + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 vf_sa_idx; + u32 pf_sa_idx; + s32 ret; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + vf_sa_idx = xs->xso.offload_handle - SXEVF_IPSEC_RX_INDEX_BASE; + pf_sa_idx = ipsec->rx_table[vf_sa_idx].pf_sa_idx; + + if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED, + &ipsec->rx_table[vf_sa_idx].status)) { + LOG_DEV_ERR("vf_sa_idx:%d not in used, offload_handle: %lu.\n", + vf_sa_idx, xs->xso.offload_handle); + goto l_end; + } + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_ipsec_sa_del(adapter, pf_sa_idx); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) { + LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x " + "flags:0x%x del fail.(err:%d)\n", + vf_sa_idx, + pf_sa_idx, + xs->xso.flags, ret); + goto l_end; + } + + hash_del_rcu(&ipsec->rx_table[vf_sa_idx].hlist); + memset(&ipsec->rx_table[vf_sa_idx], 0, sizeof(struct sxevf_rx_sa)); + ipsec->rx_sa_cnt--; + } else { + vf_sa_idx = xs->xso.offload_handle - SXEVF_IPSEC_TX_INDEX_BASE; + pf_sa_idx = ipsec->tx_table[vf_sa_idx].pf_sa_idx; + if (!test_bit(SXEVF_IPSEC_SA_ENTRY_USED, + &ipsec->tx_table[vf_sa_idx].status)) { + LOG_DEV_ERR("vf_sa_idx:%d not in used, offload_handle: %lu.\n", + vf_sa_idx, xs->xso.offload_handle); + goto l_end; + } + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_ipsec_sa_del(adapter, pf_sa_idx); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) { + LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x " + "flags:0x%x del fail.(err:%d)\n", + vf_sa_idx, + pf_sa_idx, + xs->xso.flags, ret); + goto l_end; + } + + memset(&ipsec->tx_table[vf_sa_idx], 0, sizeof(struct sxevf_tx_sa)); + ipsec->tx_sa_cnt--; + } + + LOG_ERROR("vf_sa_idx:%u pf_sa_idx:0x%x flags:0x%x del done.\n", + vf_sa_idx, + pf_sa_idx, + xs->xso.flags); + +l_end: + return; +} + +static const struct xfrmdev_ops sxevf_xfrmdev_ops = { + .xdo_dev_offload_ok = sxevf_ipsec_offload_ok, + .xdo_dev_state_add = sxevf_ipsec_state_add, + .xdo_dev_state_delete = sxevf_ipsec_state_delete, +}; + +void sxevf_ipsec_offload_init(struct sxevf_adapter *adapter) +{ + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + u32 size; + + hash_init(ipsec->rx_table_list); + + size = sizeof(struct sxevf_rx_sa) * SXEVF_IPSEC_SA_CNT_MAX; + ipsec->rx_table = kzalloc(size, GFP_KERNEL); + if (ipsec->rx_table == NULL) { + LOG_DEV_ERR("ipsec rx sa table mem:%uB alloc fail.\n", size); + goto l_out; + } + + size = sizeof(struct sxevf_tx_sa) * SXEVF_IPSEC_SA_CNT_MAX; + ipsec->tx_table = kzalloc(size, GFP_KERNEL); + if (ipsec->tx_table == NULL) { + LOG_DEV_ERR("ipsec tx sa table mem:%uB alloc fail.\n", size); + goto l_free_rx_table; + } + + ipsec->rx_sa_cnt = 0; + ipsec->tx_sa_cnt = 0; + + adapter->netdev->xfrmdev_ops = &sxevf_xfrmdev_ops; + + LOG_INFO("ipsec init done.\n"); + +l_out: + return; + +l_free_rx_table: + SXEVF_KFREE(ipsec->rx_table); + return; +} + +void sxevf_ipsec_offload_exit(struct sxevf_adapter *adapter) +{ + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + + SXEVF_KFREE(ipsec->rx_table); + SXEVF_KFREE(ipsec->tx_table); + + LOG_INFO("ipsec exit done.\n"); + + return; +} + +void sxevf_ipsec_restore(struct sxevf_adapter *adapter) +{ + struct sxevf_ipsec_context *ipsec = &adapter->ipsec_ctxt; + struct sxevf_rx_sa *rx_sa; + struct sxevf_tx_sa *tx_sa; + u16 i; + s32 ret; + + if (!(adapter->netdev->features & NETIF_F_HW_ESP)) { + goto l_end; + } + + for (i = 0; i < SXEVF_IPSEC_SA_CNT_MAX; i++) { + rx_sa = &ipsec->rx_table[i]; + tx_sa = &ipsec->tx_table[i]; + + if (test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &rx_sa->status)) { + ret = sxevf_ipsec_sa_add_to_pf(adapter, rx_sa->xs, NULL); + if (ret) { + LOG_DEV_ERR("rx xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + rx_sa->xs->xso.flags, + rx_sa->xs->id.spi, + rx_sa->xs->id.proto, + rx_sa->xs->props.family, ret); + } + } + + if (test_bit(SXEVF_IPSEC_SA_ENTRY_USED, &tx_sa->status)) { + ret = sxevf_ipsec_sa_add_to_pf(adapter, tx_sa->xs, NULL); + if (ret) { + LOG_DEV_ERR("tx xfrm state flags:0x%x spi:0x%x proto:0x%x " + "family:0x%x add to pf fail.(err:%d)\n", + tx_sa->xs->xso.flags, + tx_sa->xs->id.spi, + tx_sa->xs->id.proto, + tx_sa->xs->props.family, ret); + } + } + + } + +l_end: + return; +} + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.h new file mode 100644 index 000000000000..75a532db2e8b --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ipsec.h @@ -0,0 +1,91 @@ + +#ifndef __SXEVF_IPSEC_H__ +#define __SXEVF_IPSEC_H__ + +#include "sxevf_ring.h" + +struct sxevf_adapter; + +#define SXEVF_IPSEC_SA_CNT_MAX (1024) + +#define SXEVF_IPSEC_RX_INDEX_BASE (0) +#define SXEVF_IPSEC_TX_INDEX_BASE (SXEVF_IPSEC_SA_CNT_MAX) + +#define SXEVF_IPSEC_AUTH_BIT_LEN (128) +#define SXEVF_IPSEC_SA_ENTRY_USED (0x1) + +#define SXEVF_IPSEC_PADLEN_OFFSET \ + ((SXEVF_IPSEC_AUTH_BIT_LEN / 8) + 2) +#define SXEVF_IPSEC_PADLEN_BYTE (1) + +#define SXEVF_IPSEC_IP_LEN (4) +#define SXEVF_IPSEC_KEY_LEN (4) +#define SXEVF_IPSEC_KEY_SALT_BIT_LEN (160) +#define SXEVF_IPSEC_KEY_BIT_LEN (128) +#define SXEVF_IPSEC_KEY_SALT_BYTE_LEN (SXEVF_IPSEC_KEY_SALT_BIT_LEN / 8) +#define SXEVF_IPSEC_KEY_BYTE_LEN (SXEVF_IPSEC_KEY_BIT_LEN / 8) + +#define SXEVF_IPV4_ADDR_SIZE (4) +#define SXEVF_IPV6_ADDR_SIZE (16) + +#define SXEVF_IPSEC_RXMOD_VALID 0x00000001 +#define SXEVF_IPSEC_RXMOD_PROTO_ESP 0x00000004 +#define SXEVF_IPSEC_RXMOD_DECRYPT 0x00000008 +#define SXEVF_IPSEC_RXMOD_IPV6 0x00000010 +#define SXEVF_IPSEC_RXTXMOD_VF 0x00000020 + +#define SXEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + +struct sxevf_tx_sa { + struct xfrm_state *xs; + u32 key[SXEVF_IPSEC_KEY_LEN]; + u32 salt; + u32 mode; + bool encrypt; + u32 pf_sa_idx; + unsigned long status; +}; + +struct sxevf_rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + + u32 key[SXEVF_IPSEC_KEY_LEN]; + u32 salt; + __be32 ip_addr[SXEVF_IPSEC_IP_LEN]; + u32 mode; + + u32 pf_sa_idx; + bool decrypt; + unsigned long status; +}; + +struct sxevf_ipsec_context { + u16 rx_sa_cnt; + u16 tx_sa_cnt; + u64 rx_ipsec; + + struct sxevf_rx_sa *rx_table; + + struct sxevf_tx_sa *tx_table; + + DECLARE_HASHTABLE(rx_table_list, 10); +}; + +void sxevf_ipsec_offload_init(struct sxevf_adapter *adapter); + +void sxevf_ipsec_offload_exit(struct sxevf_adapter *adapter); + +void sxevf_rx_ipsec_proc(struct sxevf_ring *tx_ring, + union sxevf_rx_data_desc *desc, + struct sk_buff *skb); + +s32 sxevf_tx_ipsec_offload(struct sxevf_ring *tx_ring, + struct sxevf_tx_buffer *first, + struct sxevf_tx_context_desc *ctxt_desc); + +void sxevf_ipsec_restore(struct sxevf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.c new file mode 100644 index 000000000000..e872d36bce9c --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.c @@ -0,0 +1,942 @@ + +#include +#include +#include +#include + +#include "sxevf.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxevf_irq.h" +#include "sxe_log.h" +#include "sxevf_monitor.h" +#include "sxevf_rx_proc.h" +#include "sxevf_tx_proc.h" +#include "sxevf_netdev.h" + +#ifdef NETIF_NAPI_ADD_API_NEED_3_PARAMS +static inline void +netif_napi_add_compat(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int weight) +{ + netif_napi_add(dev, napi, poll); +} + +#define netif_napi_add(dev, napi, poll, weight) netif_napi_add_compat(dev, napi, poll, weight) +#endif + +s32 sxevf_irq_coalesce_get(struct net_device * netdev, + struct ethtool_coalesce *user) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + u16 rx_itr = adapter->irq_ctxt.rx_irq_interval; + u16 tx_itr = adapter->irq_ctxt.tx_irq_interval; + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + bool is_mixed; + s32 ret = 0; + + + if (irq_data->tx.list.cnt && irq_data->rx.list.cnt) { + is_mixed = true; + } else { + is_mixed = false; + } + + if (rx_itr == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + user->rx_coalesce_usecs = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE; + } else { + user->rx_coalesce_usecs = rx_itr >> SXEVF_EITR_ITR_SHIFT; + } + + if (is_mixed) { + LOG_INFO_BDF("interrupt 0 has both rx and tx ring, " + "just report rx itr:%u.\n", + user->rx_coalesce_usecs); + goto l_out; + } + + if (tx_itr == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + user->tx_coalesce_usecs = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE; + } else { + user->tx_coalesce_usecs = tx_itr >> SXEVF_EITR_ITR_SHIFT; + } + + LOG_INFO_BDF("rx irq interval:%u tx irq interval:%u.\n", + rx_itr, tx_itr); + +l_out: + return ret; +} + +s32 sxevf_irq_coalesce_set(struct net_device *netdev, struct ethtool_coalesce *user) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[0]; + u16 tx_itr, tx_itr_old; + u16 rx_itr; + u8 i; + bool is_mixed; + bool need_rst = false; + u32 itr_max = SXEVF_EITR_ITR_MAX; + s32 ret = 0; + + if ((user->rx_coalesce_usecs > itr_max) || + (user->tx_coalesce_usecs > itr_max)) { + ret = -EINVAL; + LOG_ERROR_BDF("user param invalid, rx_coalesce_usecs:%u" + "tx_coalesce_usecs:%u max:%u.(err:%d)\n", + user->rx_coalesce_usecs, + user->tx_coalesce_usecs, + itr_max, ret); + goto l_out; + } + + if (irq_data->tx.list.cnt && irq_data->rx.list.cnt) { + is_mixed = true; + } else { + is_mixed = false; + } + + if(is_mixed) { + if (user->tx_coalesce_usecs) { + ret = -EINVAL; + LOG_ERROR_BDF("irq both has rx and rx ring, rx_coalesce_usecs:%u" + "tx_coalesce_usecs:%u invalid.(err:%d)\n", + user->rx_coalesce_usecs, + user->tx_coalesce_usecs, + ret); + goto l_out; + } + tx_itr_old = adapter->irq_ctxt.rx_irq_interval; + } else { + tx_itr_old = adapter->irq_ctxt.rx_irq_interval; + } + + if (user->rx_coalesce_usecs > SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + adapter->irq_ctxt.rx_irq_interval = user->rx_coalesce_usecs << + SXEVF_EITR_ITR_SHIFT; + } else { + adapter->irq_ctxt.rx_irq_interval = user->rx_coalesce_usecs; + } + + if (adapter->irq_ctxt.rx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + rx_itr = SXEVF_IRQ_INTERVAL_20K; + } else { + rx_itr = adapter->irq_ctxt.rx_irq_interval; + } + + if (user->tx_coalesce_usecs > SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + adapter->irq_ctxt.tx_irq_interval = user->tx_coalesce_usecs << + SXEVF_EITR_ITR_SHIFT; + } else { + adapter->irq_ctxt.tx_irq_interval = user->tx_coalesce_usecs; + } + + if (adapter->irq_ctxt.tx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + tx_itr = SXEVF_IRQ_INTERVAL_12K; + } else { + tx_itr = adapter->irq_ctxt.tx_irq_interval; + } + + if (is_mixed) { + adapter->irq_ctxt.tx_irq_interval = adapter->irq_ctxt.rx_irq_interval; + } + + if (!!adapter->irq_ctxt.tx_irq_interval != !!tx_itr_old) { + need_rst = true; + } + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + irq_data = adapter->irq_ctxt.irq_data[i]; + if (irq_data->tx.list.cnt && !irq_data->rx.list.cnt) { + irq_data->irq_interval = tx_itr; + } else { + irq_data->irq_interval = rx_itr; + } + + hw->irq.ops->ring_irq_interval_set(hw, i, irq_data->irq_interval); + } + + if (need_rst) { + if (netif_running(netdev)) { + sxevf_hw_reinit(adapter); + } else { + sxevf_reset(adapter); + } + } + + LOG_INFO_BDF("user tx_coalesce_usecs:%u rx_coalesce_usecs:%u " + "adapter tx_irq_interval:%u rx_irq_interval:%u " + "tx_itr:%u rx_itr:%u need_rst:%u is_misxed:%u.\n", + user->tx_coalesce_usecs, + user->rx_coalesce_usecs, + adapter->irq_ctxt.tx_irq_interval, + adapter->irq_ctxt.rx_irq_interval, + tx_itr, + rx_itr, + need_rst, + is_mixed); + +l_out: + return ret; +} + +static void sxevf_irq_num_init(struct sxevf_adapter *adapter) +{ + u16 ring_irq; + u16 cpu_cnt = num_online_cpus(); + + ring_irq = max(adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num); + + ring_irq = min_t(u16, ring_irq, cpu_cnt); + + adapter->irq_ctxt.total_irq_num = ring_irq + SXEVF_NON_QUEUE_IRQ_NUM; + + adapter->irq_ctxt.ring_irq_num = ring_irq; + + LOG_INFO_BDF("msi-x interrupt rxr_num:%u txr_num:%u " + "xdp_num:%u cpu cnt:%u " + "total_irq_num:%u ring_irq_num:%u\n", + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num, + cpu_cnt, + adapter->irq_ctxt.total_irq_num, + adapter->irq_ctxt.ring_irq_num); + + return; +} + +static s32 sxevf_msix_irq_init(struct sxevf_adapter *adapter) +{ + u16 i; + s32 ret; + u16 total = adapter->irq_ctxt.total_irq_num; + + adapter->irq_ctxt.msix_entries = kcalloc(total, + sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->irq_ctxt.msix_entries == NULL) { + ret = -ENOMEM; + LOG_ERROR_BDF("msi-x irq entry num:%u per size:%lu kcalloc fail." + "(err:%d)\n", + total, sizeof(struct msix_entry), ret); + goto l_out; + } + + for (i = 0; i < total; i++) { + adapter->irq_ctxt.msix_entries[i].entry = i; + } + + ret = pci_enable_msix_range(adapter->pdev, + adapter->irq_ctxt.msix_entries, + SXEVF_MIN_MSIX_IRQ_NUM, total); + if (ret < 0) { + SXEVF_KFREE(adapter->irq_ctxt.msix_entries); + + LOG_DEV_ERR("min:%u max:%u pci enable msi-x failed.(err:%d)\n", + SXEVF_MIN_MSIX_IRQ_NUM, total, ret); + } else { + if (ret != total) { + adapter->irq_ctxt.total_irq_num = ret; + adapter->irq_ctxt.ring_irq_num = ret - + SXEVF_NON_QUEUE_IRQ_NUM; + } + LOG_INFO_BDF("enable pci msi-x success.result:%d maxCnt:%u" + " total irq num:%u ring irq num:%u\n", + ret, total, + adapter->irq_ctxt.total_irq_num, + adapter->irq_ctxt.ring_irq_num); + + ret = 0; + } + +l_out: + return ret; +} + +static void sxevf_irq_data_free(struct sxevf_adapter *adapter, + u16 irq_idx) +{ + u16 idx; + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxevf_ring *ring; + + sxevf_for_each_ring(ring, irq_data->tx.list) { + adapter->tx_ring_ctxt.ring[ring->idx] = NULL; + } + + if (irq_data->tx.xdp_ring) { + idx = irq_data->tx.xdp_ring->idx; + adapter->xdp_ring_ctxt.ring[idx] = NULL; + } + + sxevf_for_each_ring(ring, irq_data->rx.list) { + adapter->rx_ring_ctxt.ring[ring->idx] = NULL; + } + + adapter->irq_ctxt.irq_data[irq_idx] = NULL; + + netif_napi_del(&irq_data->napi); + kfree_rcu(irq_data, rcu); + + return ; +} + +static void sxevf_all_irq_data_free(struct sxevf_adapter *adapter) +{ + u16 irq_idx = adapter->irq_ctxt.ring_irq_num; + + while (irq_idx--) { + sxevf_irq_data_free(adapter, irq_idx); + } + + return; +} + +static s32 sxevf_irq_data_alloc(struct sxevf_adapter *adapter, + u16 total_count, u16 irq_idx) +{ + s32 ret = 0; + struct sxevf_irq_data *irq_data; + + irq_data = kzalloc(struct_size(irq_data, ring, total_count), + GFP_KERNEL); + if (irq_data == NULL) { + ret = -ENOMEM; + LOG_ERROR_BDF("alloc interrupt data and ring resource " + "failed. size: %ld irq_idx:%u " + "ring count:%u.(err:%d)\n", + struct_size(irq_data, ring, total_count), + irq_idx, total_count, ret); + goto l_out; + } + + netif_napi_add(adapter->netdev, &irq_data->napi, sxevf_poll, + SXEVF_NAPI_WEIGHT); + + adapter->irq_ctxt.irq_data[irq_idx] = irq_data; + irq_data->adapter = adapter; + irq_data->irq_idx = irq_idx; + +l_out: + return ret; +} + +static void sxevf_irq_interval_init(struct sxevf_irq_context *irq_ctxt, + u16 irq_idx, u16 txr_cnt, u16 rxr_cnt) +{ + struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + + if (txr_cnt && !rxr_cnt) { + if (irq_ctxt->tx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + irq_data->irq_interval = SXEVF_IRQ_INTERVAL_12K; + } else { + irq_data->irq_interval = irq_ctxt->tx_irq_interval; + } + } else { + if (irq_ctxt->rx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + irq_data->irq_interval = SXEVF_IRQ_INTERVAL_20K; + } else { + irq_data->irq_interval = irq_ctxt->rx_irq_interval; + } + } + + irq_data->tx.irq_rate.irq_interval = SXEVF_LOWEST_LATENCY; + irq_data->rx.irq_rate.irq_interval = SXEVF_LOWEST_LATENCY; + + LOG_INFO("irq_idx:%u irq level interval:%u " + "list level rx irq interval:%u tx irq interval:%u\n", + irq_idx, irq_data->irq_interval, + irq_data->rx.irq_rate.irq_interval, + irq_data->tx.irq_rate.irq_interval); + + return; +} + +static s32 sxevf_irq_ring_bind(struct sxevf_adapter *adapter) +{ + s32 ret = 0; + u16 rxr_idx = 0; + u16 txr_idx = 0; + u16 xdp_idx = 0; + u16 irq_idx = 0; + u16 irq_num = adapter->irq_ctxt.ring_irq_num; + u16 rxr_remain = adapter->rx_ring_ctxt.num; + u16 txr_remain = adapter->tx_ring_ctxt.num; + u16 xdp_remain = adapter->xdp_ring_ctxt.num; + u16 total_ring = rxr_remain + txr_remain + xdp_remain; + + if (irq_num >= total_ring) { + for (; rxr_remain > 0; irq_idx++, irq_num--) { + u16 rxr_cnt = DIV_ROUND_UP(txr_remain, irq_num); + + ret = sxevf_irq_data_alloc(adapter, rxr_cnt, irq_idx); + if (ret) { + LOG_ERROR_BDF("irq_num:%u rxr_remain:%u " + "txr_remain:%u xdp_remain:%u " + "irq_idx:%u alloc rx irq " + "resource priority fail.(err:%d)\n", + irq_num, rxr_remain, + txr_remain, xdp_remain, + irq_idx, ret); + goto l_error; + } + + sxevf_irq_interval_init(&adapter->irq_ctxt, irq_idx, 0, 1); + + sxevf_rx_ring_init(adapter, 0, 1, rxr_idx, + irq_idx, rxr_idx); + + rxr_remain -= rxr_cnt; + rxr_idx += rxr_cnt; + } + LOG_INFO_BDF("alloc rx irq resource priority done.irq_idx:%u " + "rxr_idx:%u txr_remain:%u rxr_remain:%u xdp_remain:%u" + " ring_irq_num:%u total_ring:%u \n", + irq_idx, rxr_idx, txr_remain, rxr_remain, + xdp_remain, irq_num, total_ring); + } + + for (; irq_num; irq_idx++, irq_num--) { + u16 txr_cnt = DIV_ROUND_UP(txr_remain, irq_num); + + u16 xdp_cnt = DIV_ROUND_UP(xdp_remain, irq_num); + + u16 rxr_cnt = DIV_ROUND_UP(rxr_remain, irq_num); + u16 tx_reg_idx = txr_idx + xdp_idx; + u16 xdp_reg_idx = txr_cnt ? (tx_reg_idx + 1) : tx_reg_idx; + + total_ring = txr_cnt + xdp_cnt + rxr_cnt; + + LOG_DEBUG_BDF("irq_num:%u irq_idx:%u txr_cnt:%u xdp_cnt:%u" + " rxr_cnt:%u base txr_idx:%u xdp_idx:%u" + " rxr_idx:%u \n", + irq_num, irq_idx, txr_cnt, xdp_cnt, rxr_cnt, + txr_idx, xdp_idx, rxr_idx); + + ret = sxevf_irq_data_alloc(adapter, total_ring, irq_idx); + if (ret) { + LOG_ERROR_BDF("irq_num:%u rxr_remain:%u txr_remain:%u " + "xdp_remain:%u rxr_cnt:%u txr_cnt:%u " + " xdp_cnt:%u ird_idx:%u alloc irq resource " + " fail.(err:%d)\n", + irq_num, rxr_remain, txr_remain, + xdp_remain, rxr_cnt, txr_cnt, + xdp_cnt, irq_idx, ret); + goto l_error; + } + + sxevf_irq_interval_init(&adapter->irq_ctxt, irq_idx, txr_cnt, + rxr_cnt); + + + sxevf_tx_ring_init(adapter, 0, txr_cnt, txr_idx, + irq_idx, tx_reg_idx); + + sxevf_xdp_ring_init(adapter, txr_cnt, xdp_cnt, + xdp_idx, irq_idx, xdp_reg_idx); + + sxevf_rx_ring_init(adapter, txr_cnt + xdp_cnt, rxr_cnt, + rxr_idx, irq_idx, rxr_idx); + + txr_remain -= txr_cnt; + xdp_remain -= xdp_cnt; + rxr_remain -= rxr_cnt; + + txr_idx += txr_cnt; + xdp_idx += xdp_cnt; + rxr_idx += rxr_cnt; + } + + return ret; + +l_error: + adapter->irq_ctxt.ring_irq_num = 0; + adapter->tx_ring_ctxt.num = 0; + adapter->rx_ring_ctxt.num = 0; + adapter->xdp_ring_ctxt.num = 0; + + while(irq_idx--) { + sxevf_irq_data_free(adapter, irq_idx); + } + + return ret; + +} + +static void sxevf_pci_irq_disable(struct sxevf_adapter *adapter) +{ + pci_disable_msix(adapter->pdev); + SXEVF_KFREE(adapter->irq_ctxt.msix_entries); + + return; +} + +void sxevf_hw_irq_disable(struct sxevf_adapter *adapter) +{ + u16 i; + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_irq_context *irq = &adapter->irq_ctxt; + + hw->irq.ops->irq_disable(hw); + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + synchronize_irq(irq->msix_entries[i].vector); + } + + synchronize_irq(irq->msix_entries[i].vector); + + return; +} + +void sxevf_irq_release(struct sxevf_adapter *adapter) +{ + u16 irq_idx; + struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt; + + if (!irq_ctxt->msix_entries) { + goto l_out; + } + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + struct msix_entry *entry = &irq_ctxt->msix_entries[irq_idx]; + + if (!irq_data->rx.list.next && + !irq_data->tx.list.next && + !irq_data->tx.xdp_ring) { + continue; + } + + free_irq(entry->vector, irq_data); + } + + free_irq(irq_ctxt->msix_entries[irq_idx].vector, adapter); + +l_out: + return; +} + +s32 sxevf_irq_ctxt_init(struct sxevf_adapter *adapter) +{ + s32 ret; + + adapter->irq_ctxt.rx_irq_interval = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE; + adapter->irq_ctxt.tx_irq_interval = SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE; + + sxevf_irq_num_init(adapter); + + ret = sxevf_msix_irq_init(adapter); + if (ret) { + LOG_DEV_ERR("msix irq init fail.(err:%d)\n", + ret); + goto l_out; + } + + ret = sxevf_irq_ring_bind(adapter); + if (ret) { + LOG_DEV_ERR("interrupt and ring bind fail.(err:%d)\n", ret); + goto l_disable_irq; + } + + LOG_INFO_BDF("adapter rx_irq_interval:%u tx_irq_interval:%u.\n", + adapter->irq_ctxt.rx_irq_interval, + adapter->irq_ctxt.tx_irq_interval); + +l_out: + return ret; + +l_disable_irq: + sxevf_pci_irq_disable(adapter); + return ret; + +} + +void sxevf_irq_ctxt_exit(struct sxevf_adapter *adapter) +{ + sxevf_all_irq_data_free(adapter); + + sxevf_pci_irq_disable(adapter); + + adapter->irq_ctxt.ring_irq_num = 0; + adapter->tx_ring_ctxt.num = 0; + adapter->rx_ring_ctxt.num = 0; + adapter->xdp_ring_ctxt.num = 0; + + return; +} + +static bool sxevf_set_irq_name(struct sxevf_irq_data *irq_data, + char *dev_name, + u16 *rx_idx, u16 *tx_idx) +{ + if (irq_data->tx.list.next && + irq_data->rx.list.next) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-TxRx-%u", dev_name, (*rx_idx)++); + (*tx_idx)++; + } else if (irq_data->rx.list.next) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-Rx-%u", dev_name, (*rx_idx)++); + } else if (irq_data->tx.list.next || + irq_data->tx.xdp_ring) { + snprintf(irq_data->name, sizeof(irq_data->name), + "%s-Tx-%u", dev_name, (*tx_idx)++); + } else { + LOG_INFO("%u irq has no ring bind.\n", irq_data->irq_idx); + return false; + } + + return true; +} + +STATIC irqreturn_t sxevf_ring_irq_handler(int irq, void *data) +{ + struct sxevf_irq_data *irq_data = data; + + if (irq_data->tx.list.next || + irq_data->rx.list.next || + irq_data->tx.xdp_ring) { + napi_schedule_irqoff(&irq_data->napi); + } + + return IRQ_HANDLED ; +} + +STATIC irqreturn_t sxevf_event_irq_handler(int irq, void *data) +{ + struct sxevf_adapter *adapter = data; + struct sxevf_hw *hw = &adapter->hw; + + set_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + + sxevf_monitor_work_schedule(adapter); + + hw->irq.ops->specific_irq_enable(hw, adapter->irq_ctxt.mailbox_irq); + + LOG_INFO_BDF("rcv event irq:%d\n", irq); + + return IRQ_HANDLED; +} + +static s32 sxevf_msix_request_irqs(struct sxevf_adapter *adapter) +{ + s32 ret; + u16 rx_idx = 0; + u16 tx_idx = 0; + u16 irq_idx; + struct sxevf_irq_data *irq_data; + struct msix_entry *entry; + struct net_device *netdev = adapter->netdev; + struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt; + + if (!irq_ctxt->ring_irq_num) { + ret = -SXEVF_ERR_IRQ_NUM_INVALID; + LOG_ERROR_BDF("irq_num:%d request irq fail, invalid retry open" + "need reload ko.(err:%d)\n", + irq_ctxt->ring_irq_num, ret); + goto l_out; + } + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + irq_data = irq_ctxt->irq_data[irq_idx]; + entry = &irq_ctxt->msix_entries[irq_idx]; + + if (!(sxevf_set_irq_name(irq_data, netdev->name, + &rx_idx, &tx_idx))) { + continue; + } + + ret = request_irq(entry->vector, &sxevf_ring_irq_handler, 0, + irq_data->name, irq_data); + if (ret) { + LOG_DEV_ERR("irq_idx:%u rx_idx:%u tx_idx:%u irq_num:%u " + "vector:%u msi-x ring interrupt " + "reuqest fail.(err:%d)\n", + irq_idx, rx_idx, tx_idx, + irq_ctxt->ring_irq_num, + entry->vector, ret); + goto l_free_irq; + } + + } + + ret = request_irq(irq_ctxt->msix_entries[irq_idx].vector, + sxevf_event_irq_handler, 0, netdev->name, adapter); + if (ret) { + LOG_DEV_ERR("irq_idx:%u vector:%u msi-x other interrupt " + "reuqest fail.(err:%d)\n", + irq_idx, + irq_ctxt->msix_entries[irq_idx].vector, ret); + goto l_free_irq; + } + +l_out: + return ret; + +l_free_irq: + while (irq_idx) { + irq_idx--; + free_irq(irq_ctxt->msix_entries[irq_idx].vector, + irq_ctxt->irq_data[irq_idx]); + } + + return ret; +} + +void sxevf_configure_msix_hw(struct sxevf_adapter *adapter) +{ + u16 irq_idx; + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_ring *ring; + struct sxevf_irq_context *irq_ctxt = &adapter->irq_ctxt; + irq_ctxt->irq_mask = 0; + + for (irq_idx = 0; irq_idx < irq_ctxt->ring_irq_num; irq_idx++) { + struct sxevf_irq_data *irq_data = irq_ctxt->irq_data[irq_idx]; + + sxevf_for_each_ring(ring, irq_data->rx.list) { + hw->irq.ops->ring_irq_map(hw, false, ring->reg_idx, + irq_idx); + } + + sxevf_for_each_ring(ring, irq_data->tx.list) { + hw->irq.ops->ring_irq_map(hw, true, ring->reg_idx, + irq_idx); + } + + if (irq_data->tx.xdp_ring) { + hw->irq.ops->ring_irq_map(hw, true, + irq_data->tx.xdp_ring->reg_idx, + irq_idx); + } + + hw->irq.ops->ring_irq_interval_set(hw, irq_idx, irq_data->irq_interval); + irq_ctxt->irq_mask |= BIT(irq_idx); + + } + + irq_ctxt->mailbox_irq = BIT(irq_idx); + irq_ctxt->irq_mask |= BIT(irq_idx); + + hw->irq.ops->event_irq_map(hw, irq_idx); + + return; +} + +static void sxevf_napi_enable_all(struct sxevf_adapter * adapter) +{ + u16 irq_idx; + + for (irq_idx = 0; irq_idx < adapter->irq_ctxt.ring_irq_num; irq_idx++) { + napi_enable(&(adapter->irq_ctxt.irq_data[irq_idx]->napi)); + } + + return; +} + +void sxevf_napi_disable(struct sxevf_adapter *adapter) +{ + u16 irq_idx; + + for (irq_idx = 0; irq_idx < adapter->irq_ctxt.ring_irq_num; irq_idx++) { + napi_disable(&adapter->irq_ctxt.irq_data[irq_idx]->napi); + } + + return; +} + +void sxevf_hw_irq_configure(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + + sxevf_configure_msix_hw(adapter); + + smp_mb__before_atomic(); + clear_bit(SXEVF_DOWN, &adapter->state); + + sxevf_napi_enable_all(adapter); + + hw->irq.ops->pending_irq_clear(hw); + + hw->irq.ops->irq_enable(hw, adapter->irq_ctxt.irq_mask); + + return; +} + +s32 sxevf_irq_configure(struct sxevf_adapter *adapter) +{ + s32 ret; + + ret = sxevf_msix_request_irqs(adapter); + if (ret) { + LOG_DEV_ERR("irq_num:%d msi-x request irq failed, (err:%d)\n", + adapter->irq_ctxt.ring_irq_num, ret); + goto l_out; + } + + sxevf_hw_irq_configure(adapter); + +l_out: + return ret; +} + +static void sxevf_irq_interval_update(struct sxevf_irq_data *irq_data, + struct sxevf_irq_rate *rate) +{ + u32 bytes = rate->total_bytes; + u32 packets = rate->total_packets; + u16 old_irq_itr = irq_data->irq_interval >> SXEVF_EITR_ITR_SHIFT; + u64 bytes_rate; + u16 itr = rate->irq_interval; + + if ((packets == 0) || (old_irq_itr == 0)) { + goto l_end; + } + + bytes_rate = bytes / old_irq_itr; + switch (itr) { + case SXEVF_LOWEST_LATENCY: + if (bytes_rate > SXEVF_LOW_LATENCY_BYTE_RATE_MIN) { + itr = SXEVF_LOW_LATENCY; + } + break; + case SXEVF_LOW_LATENCY: + if (bytes_rate > SXEVF_BULK_LATENCY_BYTE_RATE_MIN) { + itr = SXEVF_BULK_LATENCY; + } else if (bytes_rate <= SXEVF_LOW_LATENCY_BYTE_RATE_MIN) { + itr = SXEVF_LOWEST_LATENCY; + } + break; + case SXEVF_BULK_LATENCY: + if (bytes_rate <= SXEVF_BULK_LATENCY_BYTE_RATE_MIN) { + itr = SXEVF_LOW_LATENCY; + } + break; + } + + rate->total_bytes = 0; + rate->total_packets = 0; + + rate->irq_interval = itr; + +l_end: + return; + +} + +static void sxevf_irq_rate_adjust(struct sxevf_irq_data *irq_data) +{ + u16 curr_itr; + u16 new_itr = irq_data->irq_interval; + struct sxevf_irq_rate *tx_rate = &irq_data->tx.irq_rate; + struct sxevf_irq_rate *rx_rate = &irq_data->rx.irq_rate; + struct sxevf_adapter *adapter = irq_data->adapter; + struct sxevf_hw *hw = &adapter->hw; + + if (irq_data->tx.list.cnt) { + sxevf_irq_interval_update(irq_data, tx_rate); + } + + if (irq_data->rx.list.cnt) { + sxevf_irq_interval_update(irq_data, rx_rate); + } + + curr_itr = max(tx_rate->irq_interval, rx_rate->irq_interval); + + switch (curr_itr) { + case SXEVF_LOWEST_LATENCY: + new_itr = SXEVF_IRQ_INTERVAL_100K; + break; + case SXEVF_LOW_LATENCY: + new_itr = SXEVF_IRQ_INTERVAL_20K; + break; + case SXEVF_BULK_LATENCY: + new_itr = SXEVF_IRQ_INTERVAL_12K; + break; + } + + if (new_itr != irq_data->irq_interval) { + new_itr = (10 * new_itr * irq_data->irq_interval) / + ((9 * new_itr) + irq_data->irq_interval); + + irq_data->irq_interval = new_itr; + + hw->irq.ops->ring_irq_interval_set(hw, irq_data->irq_idx, + irq_data->irq_interval); + } + + return; +} + +s32 sxevf_poll(struct napi_struct *napi, int weight) +{ + struct sxevf_irq_data *irq_data = container_of(napi, + struct sxevf_irq_data, napi); + struct sxevf_adapter *adapter = irq_data->adapter; + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_ring *ring; + s32 per_ring_budget; + s32 total_cleaned = 0; + bool clean_complete = true; + u32 cleaned = 0; + + sxevf_for_each_ring(ring, irq_data->tx.list) { + if (!sxevf_tx_ring_irq_clean(irq_data, ring, weight)) { + clean_complete = false; + } + } + + ring = irq_data->tx.xdp_ring; + if (ring) { + if (!sxevf_xdp_ring_irq_clean(irq_data, ring, weight)) { + clean_complete = false; + } + } + + if (weight <= 0) { + return weight; + } + + + per_ring_budget = max(weight / irq_data->rx.list.cnt, 1); + LOG_DEBUG_BDF("weight:%d rings in irq=%u, per_ring_budget=%d\n", + weight, irq_data->rx.list.cnt, per_ring_budget); + + sxevf_for_each_ring(ring, irq_data->rx.list) { + cleaned = sxevf_rx_ring_irq_clean(irq_data, ring, + per_ring_budget); + total_cleaned += cleaned; + if (cleaned >= per_ring_budget) { + clean_complete = false; + } + } + + if (!clean_complete) { + LOG_WARN_BDF("weight:%d cleand:%u total_cleaned:%d " + " per_ring_budget:%d not complete\n", + weight, cleaned, total_cleaned, per_ring_budget); + return weight; + } + + if (likely(napi_complete_done(napi, total_cleaned))) { + LOG_INFO_BDF("weight:%d cleand:%u total_cleaned:%d per_ring_budget:%d " + " complete done\n", + weight, cleaned, total_cleaned, per_ring_budget); + if (adapter->irq_ctxt.rx_irq_interval == SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE) { + sxevf_irq_rate_adjust(irq_data); + } + + if (!test_bit(SXEVF_DOWN, &adapter->state)) { + hw->irq.ops->specific_irq_enable(hw, + BIT_ULL(irq_data->irq_idx)); + } + } + + return min(total_cleaned, weight - 1); +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.h new file mode 100644 index 000000000000..d0f04bdaf4e2 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_irq.h @@ -0,0 +1,105 @@ + +#ifndef __SXEVF_IRQ_H__ +#define __SXEVF_IRQ_H__ + +#include "sxevf_ring.h" + +struct ethtool_coalesce; + +#define SXEVF_NON_QUEUE_IRQ_NUM (1) +#define SXEVF_NAPI_WEIGHT (64) + +#define SXEVF_MAX_MSIX_IRQ_NUM (2) +#define SXEVF_NON_QUEUE_IRQ_NUM (1) +#define SXEVF_MIN_QUEUE_IRQ_NUM (1) +#define SXEVF_MAX_QUEUE_IRQ_NUM (SXEVF_MAX_MSIX_IRQ_NUM) +#define SXEVF_MIN_MSIX_IRQ_NUM (SXEVF_NON_QUEUE_IRQ_NUM + SXEVF_MIN_QUEUE_IRQ_NUM) + +#define SXEVF_IRQ_INTERVAL_12K (336) +#define SXEVF_IRQ_INTERVAL_20K (200) +#define SXEVF_IRQ_INTERVAL_100K (40) + +#define SXEVF_IRQ_NAME_EXT_LEN (16) + +#define SXEVF_IRQ_ITR_CONSTANT_MODE_VALUE (1) + +enum { + SXEVF_LOWEST_LATENCY = 0, + SXEVF_LOW_LATENCY, + SXEVF_BULK_LATENCY, + SXEVF_LATENCY_NR = 255, +}; + +#define SXEVF_LOW_LATENCY_BYTE_RATE_MIN 10 +#define SXEVF_BULK_LATENCY_BYTE_RATE_MIN 20 + +struct sxevf_irq_rate { + unsigned long next_update; + unsigned int total_bytes; + unsigned int total_packets; + u16 irq_interval; +}; + +struct sxevf_list { + struct sxevf_ring *next; + u8 cnt; +}; + +struct sxevf_tx_context { + struct sxevf_list list; + struct sxevf_ring *xdp_ring; + struct sxevf_irq_rate irq_rate; + u16 work_limit; +}; + +struct sxevf_rx_context { + struct sxevf_list list; + struct sxevf_irq_rate irq_rate; +}; + +struct sxevf_irq_data { + struct sxevf_adapter *adapter; + u16 irq_idx; + u16 irq_interval; + struct sxevf_tx_context tx; + struct sxevf_rx_context rx; + struct napi_struct napi; + struct rcu_head rcu; + s8 name[IFNAMSIZ + SXEVF_IRQ_NAME_EXT_LEN]; + struct sxevf_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +struct sxevf_irq_context { + struct msix_entry *msix_entries; + struct sxevf_irq_data *irq_data[SXEVF_MAX_QUEUE_IRQ_NUM]; + u16 ring_irq_num; + u16 total_irq_num; + u16 rx_irq_interval; + u16 tx_irq_interval; + u32 irq_mask; + u32 mailbox_irq; +}; + +s32 sxevf_poll(struct napi_struct *napi, int weight); + +void sxevf_irq_ctxt_exit(struct sxevf_adapter *adapter); + +s32 sxevf_irq_ctxt_init(struct sxevf_adapter *adapter); + +void sxevf_irq_release(struct sxevf_adapter *adapter); + +void sxevf_hw_irq_configure(struct sxevf_adapter *adapter); + +s32 sxevf_irq_configure(struct sxevf_adapter *adapter); + +void sxevf_hw_irq_disable(struct sxevf_adapter *adapter); + +void sxevf_napi_disable(struct sxevf_adapter *adapter); + +void sxevf_configure_msix_hw(struct sxevf_adapter *adapter); + +s32 sxevf_irq_coalesce_set(struct net_device *netdev, struct ethtool_coalesce *user); + +s32 sxevf_irq_coalesce_get(struct net_device * netdev, + struct ethtool_coalesce *user); +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_main.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_main.c new file mode 100644 index 000000000000..7b599723b746 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_main.c @@ -0,0 +1,796 @@ +#include +#include +#include +#include +#include +#include + +#include "sxe_version.h" +#include "sxe_log.h" +#include "sxevf_netdev.h" +#include "sxevf.h" +#include "sxevf_pci.h" +#include "sxevf_ring.h" +#include "sxevf_irq.h" +#include "sxevf_msg.h" + +#define SXEVF_MSG_LEVEL_DEFAULT (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +#define SXEVF_WAIT_RST_DONE_TIMES 200 + +static struct workqueue_struct *sxevf_wq; +struct net_device *g_netdev; + +void sxevf_start_adapter(struct sxevf_adapter *adapter) +{ + ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr, + adapter->mac_filter_ctxt.def_uc_addr); + clear_bit(SXEVF_HW_STOP, &adapter->hw.state); + + return; +} + +s32 sxevf_dev_reset(struct sxevf_hw *hw) +{ + u32 retry = SXEVF_RST_CHECK_NUM; + s32 ret; + struct sxevf_rst_msg msg = {}; + struct sxevf_adapter *adapter = hw->adapter; + + set_bit(SXEVF_HW_STOP, &hw->state); + + hw->setup.ops->hw_stop(hw); + + adapter->mbx_version = SXEVF_MBX_API_10; + hw->setup.ops->reset(hw); + + if (hw->board_type == SXE_BOARD_VF_HV) { + retry = SXEVF_RST_CHECK_NUM_HV; + } + + while (!sxevf_pf_rst_check(hw) && retry) { + retry--; + udelay(5); + } + if (!retry) { + ret = -SXEVF_ERR_RESET_FAILED; + LOG_ERROR_BDF("retry use up, pf has not reset done.(err:%d)\n", ret); + goto l_out; + } + + retry = SXEVF_WAIT_RST_DONE_TIMES; + while (!hw->setup.ops->reset_done(hw) && retry) { + retry--; + msleep(50); + } + if (!retry) { + ret = -SXEVF_ERR_RESET_FAILED; + LOG_ERROR_BDF("retry use up, vflr has not reset done.(err:%d)\n", ret); + goto l_out; + } + + hw->mbx.retry = SXEVF_MBX_RETRY_COUNT; + + msg.msg_type = SXEVF_RESET; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + + if (ret) { + LOG_ERROR_BDF("vf reset msg:%d len:%zu mailbox fail.(err:%d)\n", + msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), ret); + goto l_out; + } + + sxevf_sw_mtu_set(adapter, msg.sw_mtu); + + if (msg.msg_type == (SXEVF_RESET | SXEVF_MSGTYPE_ACK)) { + ether_addr_copy(adapter->mac_filter_ctxt.def_uc_addr, + (u8 *)(msg.mac_addr)); + } else if (msg.msg_type != (SXEVF_RESET | SXEVF_MSGTYPE_NACK)) { + ret = -SXEVF_ERR_MAC_ADDR_INVALID; + LOG_ERROR_BDF("pf handle vf reset msg fail, rcv msg:0x%x.(err:%d)\n", + msg.msg_type, ret); + goto l_out; + } + + adapter->mac_filter_ctxt.mc_filter_type = msg.mc_fiter_type; + + LOG_INFO_BDF("vf get mc filter type:%d default mac addr:%pM from pf " + "sw_mtu:%u.\n", + adapter->mac_filter_ctxt.mc_filter_type, + adapter->mac_filter_ctxt.def_uc_addr, + msg.sw_mtu); + +l_out: + return ret; +} + +STATIC int sxevf_config_dma_mask(struct sxevf_adapter * adapter) +{ + int ret = 0; + + if (dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXEVF_DMA_BIT_WIDTH_64))) { + LOG_ERROR_BDF("device[pci_id %u] 64 dma mask and coherent set failed\n", + adapter->pdev->dev.id); + ret = dma_set_mask_and_coherent(&adapter->pdev->dev, + DMA_BIT_MASK(SXEVF_DMA_BIT_WIDTH_32)); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] 32 dma mask and coherent set failed\n", + adapter->pdev->dev.id); + } + } + + return ret; +} + +void sxevf_mbx_api_version_init(struct sxevf_adapter *adapter) +{ + s32 ret; + struct sxevf_hw *hw = &adapter->hw; + static const int api[] = { + SXEVF_MBX_API_14, + SXEVF_MBX_API_13, + SXEVF_MBX_API_12, + SXEVF_MBX_API_11, + SXEVF_MBX_API_10, + SXEVF_MBX_API_NR + }; + u32 idx = 0; + struct sxevf_mbx_api_msg msg; + + spin_lock_bh(&adapter->mbx_lock); + + while (api[idx] != SXEVF_MBX_API_NR) { + msg.msg_type = SXEVF_API_NEGOTIATE; + msg.api_version = api[idx]; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + if (!ret && (msg.msg_type == (SXEVF_API_NEGOTIATE | SXEVF_MSGTYPE_ACK))) { + adapter->mbx_version = api[idx]; + break; + } else { + idx++; + } + } + + spin_unlock_bh(&adapter->mbx_lock); + + LOG_INFO_BDF("mbx version:%u.\n", adapter->mbx_version); + return; +} + +STATIC int sxevf_pci_init(struct sxevf_adapter * adapter) +{ + int ret; + size_t len; + resource_size_t bar_base_paddr; + struct pci_dev *pdev = adapter->pdev; + + ret = pci_enable_device(pdev); + if (ret) { + LOG_ERROR_BDF("device[pci_id %u] pci enable failed\n", pdev->dev.id); + goto l_pci_enable_device_mem_failed; + } + + ret = pci_request_regions(pdev, SXEVF_DRV_NAME); + if (ret) { + LOG_DEV_ERR("device[pci_id %u] request IO memory failed\n", pdev->dev.id); + goto l_pci_request_mem_failed; + } + + pci_set_master(pdev); + pci_save_state(pdev); + + bar_base_paddr = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); + adapter->hw.reg_base_addr = ioremap(bar_base_paddr, len); + if (!adapter->hw.reg_base_addr) { + ret = -EIO; + LOG_ERROR_BDF("device[pci_id %u] ioremap[bar_base_paddr = 0x%llx, len = %zu] failed\n", + pdev->dev.id, (u64)bar_base_paddr, len); + goto l_ioremap_failed; + } else { + pci_set_drvdata(pdev, adapter); + } + + LOG_INFO_BDF("bar_base_paddr = 0x%llx, bar len = %zu, reg_base_addr = %p\n", + (u64)bar_base_paddr, len, adapter->hw.reg_base_addr); + return 0; + +l_ioremap_failed: + pci_release_regions(pdev); +l_pci_request_mem_failed: + pci_disable_device(pdev); +l_pci_enable_device_mem_failed: + return ret; +} + +STATIC void sxevf_pci_exit(struct sxevf_adapter * adapter) +{ + if (adapter->hw.reg_base_addr) { + iounmap(adapter->hw.reg_base_addr); + adapter->hw.reg_base_addr = NULL; + } + + if (pci_is_enabled(adapter->pdev)) { + pci_release_regions(adapter->pdev); + pci_disable_device(adapter->pdev); + pci_set_drvdata(adapter->pdev, NULL); + } + + return; +} + +STATIC struct sxevf_adapter *sxevf_adapter_create(struct pci_dev *pdev) +{ + struct net_device *netdev; + struct sxevf_adapter * adapter = NULL; + + netdev = alloc_etherdev_mq(sizeof(struct sxevf_adapter), + SXEVF_TXRX_RING_NUM_MAX); + if (!netdev) { + LOG_ERROR("max:%d device[pci_id %u] sxe net device alloc failed\n", + SXEVF_TXRX_RING_NUM_MAX, pdev->dev.id); + goto l_netdev_alloc_failed; + } + + adapter = netdev_priv(netdev); + adapter->pdev = pdev; + adapter->netdev = netdev; + adapter->msg_enable = netif_msg_init(-1, + SXEVF_MSG_LEVEL_DEFAULT); + + LOG_INFO_BDF("adapter:0x%pK netdev:0x%pK pdev:0x%pK\n", adapter, netdev, pdev); + +l_netdev_alloc_failed: + return adapter; +} + +static inline u32 sxevf_readl(const volatile void *reg) +{ + return readl(reg); +} + +static inline void sxevf_writel(u32 value, volatile void *reg) +{ + writel(value, reg); + return; +} + +STATIC int sxevf_hw_base_init(struct sxevf_adapter *adapter) +{ + int ret; + struct sxevf_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + hw->adapter = adapter; + adapter->mbx_version = SXEVF_MBX_API_10; + + sxevf_hw_ops_init(hw); + sxevf_hw_reg_handle_init(hw, sxevf_readl, sxevf_writel); + + sxevf_mbx_init(hw); + spin_lock_init(&adapter->mbx_lock); + + ret = sxevf_dev_reset(hw); + if (ret) { + LOG_DEV_WARN("vf reset fail during probe.(err:%d)\n", ret); + } else { + sxevf_start_adapter(adapter); + sxevf_mbx_api_version_init(adapter); + + ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr, adapter->mac_filter_ctxt.def_uc_addr); + if (is_zero_ether_addr(adapter->mac_filter_ctxt.cur_uc_addr)) { + LOG_DEV_INFO("vf reset done, but pf don't assign valid" + " mac addr for vf.\n"); + } +#ifndef HAVE_ETH_HW_ADDR_SET_API + ether_addr_copy(netdev->dev_addr, adapter->mac_filter_ctxt.cur_uc_addr); +#else + eth_hw_addr_set(netdev, adapter->mac_filter_ctxt.cur_uc_addr); +#endif + } + + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr, netdev->dev_addr); + ether_addr_copy(adapter->mac_filter_ctxt.def_uc_addr, netdev->dev_addr); + + LOG_DEV_INFO("vf use random mac addr:%pM.\n", + adapter->mac_filter_ctxt.def_uc_addr); + } + + adapter->link.link_enable = true; + + return 0; +} + +static void sxevf_sw_base_init1(struct sxevf_adapter *adapter) +{ + set_bit(SXEVF_DOWN, &adapter->state); + + sxevf_ring_feature_init(adapter); + + return; +} + +s32 sxevf_ring_irq_init(struct sxevf_adapter *adapter) +{ + s32 ret; + + sxevf_ring_num_set(adapter); + + ret = sxevf_irq_ctxt_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt context init fail.(err:%d)\n", ret); + } + + LOG_DEV_DEBUG("Multiqueue %s: Rx Queue count = %u, " + "Tx Queue count = %u XDP Queue count %u\n", + (adapter->rx_ring_ctxt.num > 1) ? "Enabled" : "Disabled", + adapter->rx_ring_ctxt.num, adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num); + + return ret; +} + +void sxevf_ring_irq_exit(struct sxevf_adapter *adapter) +{ + sxevf_irq_ctxt_exit(adapter); + + return; +} + +void sxevf_save_reset_stats(struct sxevf_adapter *adapter) +{ + struct sxevf_hw_stats *stats = &adapter->stats.hw; + + if (stats->vfgprc || stats->vfgptc) { + stats->saved_reset_vfgprc += stats->vfgprc - stats->base_vfgprc; + stats->saved_reset_vfgptc += stats->vfgptc - stats->base_vfgptc; + stats->saved_reset_vfgorc += stats->vfgorc - stats->base_vfgorc; + stats->saved_reset_vfgotc += stats->vfgotc - stats->base_vfgotc; + stats->saved_reset_vfmprc += stats->vfmprc - stats->base_vfmprc; + } + + return; +} + +void sxevf_last_counter_stats_init(struct sxevf_adapter *adapter) +{ + struct sxevf_hw_stats *stats = &adapter->stats.hw; + struct sxevf_hw *hw = &adapter->hw; + + hw->stat.ops->stats_init_value_get(hw, stats); + + adapter->stats.hw.base_vfgprc = stats->last_vfgprc; + adapter->stats.hw.base_vfgorc = stats->last_vfgorc; + adapter->stats.hw.base_vfgptc = stats->last_vfgptc; + adapter->stats.hw.base_vfgotc = stats->last_vfgotc; + adapter->stats.hw.base_vfmprc = stats->last_vfmprc; + + return; +} + +static void sxevf_sw_base_init2(struct sxevf_adapter *adapter) +{ + sxevf_monitor_init(adapter); + +#ifdef SXE_IPSEC_CONFIGURE + sxevf_ipsec_offload_init(adapter); +#endif + + sxevf_last_counter_stats_init(adapter); + return; +} + +STATIC int sxevf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct sxevf_adapter *adapter; + const char *device_name = dev_name(&pdev->dev); + + adapter = sxevf_adapter_create(pdev); + if (!adapter) { + LOG_ERROR("adapter create failed.\n"); + ret = -ENOMEM; + goto l_adapter_create_failed; + } + + strlcpy(adapter->dev_name, device_name, + min_t(u32, strlen(device_name) + 1, DEV_NAME_LEN)); + adapter->hw.board_type = id ? id->driver_data : SXE_BOARD_VF; + + ret = sxevf_pci_init(adapter); + if (ret) { + LOG_ERROR_BDF("pci init failed.(ret:%d)\n", ret); + goto l_pci_init_failed; + } + + ret = sxevf_config_dma_mask(adapter); + if (ret) { + LOG_ERROR_BDF("config dma mask failed.(ret:%d)\n", ret); + goto l_config_dma_mask_failed; + } + + sxevf_netdev_init(adapter, pdev); + + ret = sxevf_hw_base_init(adapter); + if (ret) { + LOG_ERROR_BDF("hardware base init failed.(ret:%d)\n", ret); + goto l_config_dma_mask_failed; + } + + sxevf_sw_base_init1(adapter); + + ret = sxevf_ring_irq_init(adapter); + if (ret) { + LOG_ERROR_BDF("interrupt ring assign scheme init failed, err=%d\n", ret); + goto l_config_dma_mask_failed; + } + + sxevf_sw_base_init2(adapter); + + strcpy(adapter->netdev->name, "eth%d"); + ret = register_netdev(adapter->netdev); + if (ret) { + LOG_ERROR_BDF("register netdev failed.(ret:%d)\n", ret); + goto l_irq_init_failed; + } + + set_bit(SXEVF_DOWN, &adapter->state); + netif_carrier_off(adapter->netdev); + + LOG_DEV_INFO("%pM\n", adapter->netdev->dev_addr); + LOG_DEV_INFO("%s %s %s %s %s vf deviceId:0x%x mbx version:%u " + " probe done.\n", + dev_driver_string(pdev->dev.parent), + dev_name(pdev->dev.parent), + netdev_name(adapter->netdev), + dev_driver_string(&pdev->dev), + dev_name(&pdev->dev), + pdev->device, + adapter->mbx_version); + + return 0; + +l_irq_init_failed: + sxevf_ring_irq_exit(adapter); +l_config_dma_mask_failed: + sxevf_pci_exit(adapter); +l_pci_init_failed: + free_netdev(adapter->netdev); +l_adapter_create_failed: + return ret; + +} + +static void sxevf_fuc_exit(struct sxevf_adapter *adapter) +{ + cancel_work_sync(&adapter->monitor_ctxt.work); + + return; +} + +STATIC void sxevf_remove(struct pci_dev *pdev) +{ + struct sxevf_adapter * adapter = pci_get_drvdata(pdev); + struct net_device *netdev; + + LOG_INFO_BDF("sxevf remove.\n"); + if (!adapter) { + goto l_end; + } + + set_bit(SXEVF_REMOVING, &adapter->state); + netdev = adapter->netdev; + + sxevf_fuc_exit(adapter); + + if (NETREG_REGISTERED == netdev->reg_state) { + unregister_netdev(netdev); + } + +#ifdef SXE_IPSEC_CONFIGURE + sxevf_ipsec_offload_exit(adapter); +#endif + + sxevf_irq_ctxt_exit(adapter); + + sxevf_pci_exit(adapter); + LOG_DEV_DEBUG("remove sxevf complete\n"); + + free_netdev(netdev); + + LOG_INFO("%s %s %s %s deviceId:0x%x remove done.\n", + dev_driver_string(pdev->dev.parent), + dev_name(pdev->dev.parent), + dev_driver_string(&pdev->dev), + dev_name(&pdev->dev), + pdev->device); + +l_end: + return; +} + +STATIC s32 sxevf_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sxevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + s32 ret = 0; + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) { + sxevf_terminate(adapter); + } + + sxevf_ring_irq_exit(adapter); + rtnl_unlock(); + +#ifdef CONFIG_PM + ret = pci_save_state(pdev); + if (ret) { + LOG_ERROR_BDF("save pci state fail.(err:%d)\n", ret); + return ret; + } +#endif + + if (!test_and_set_bit(SXEVF_DISABLED, &adapter->state)) { + pci_disable_device(pdev); + } + + return ret; +} + +#ifdef CONFIG_PM + STATIC s32 sxevf_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sxevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + s32 ret; + + pci_restore_state(pdev); + pci_save_state(pdev); + + ret = pci_enable_device_mem(pdev); + if (ret) { + LOG_DEV_ERR("enable pci device from suspend fail.(err:%d)", ret); + goto l_end; + } + + smp_mb__before_atomic(); + clear_bit(SXEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + sxevf_reset(adapter); + + rtnl_lock(); + sxevf_ring_num_set(adapter); + ret = sxevf_irq_ctxt_init(adapter); + if (!ret && netif_running(netdev)) { + ret = sxevf_open(netdev); + } + rtnl_unlock(); + + if (ret) { + LOG_ERROR_BDF("pci device resume fail.(err:%d)\n", ret); + goto l_end; + } + + netif_device_attach(netdev); + +l_end: + return ret; +} +#endif + +STATIC void sxevf_shutdown(struct pci_dev *pdev) +{ + sxevf_suspend(&pdev->dev); + + return; +} + +STATIC void sxevf_io_resume(struct pci_dev *pdev) +{ + struct sxevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + + LOG_DEBUG_BDF("oops,vf pci dev[%p] got io resume\n",pdev); + + rtnl_lock(); + if (netif_running(netdev)) { + LOG_DEBUG_BDF("netdev running resume adapter.\n"); + sxevf_open(netdev); + } + + netif_device_attach(netdev); + rtnl_unlock(); + + LOG_INFO_BDF("vf pci dev[%p] io resume done.\n",pdev); + + return; +} + +STATIC pci_ers_result_t sxevf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct sxevf_adapter *adapter = netdev_priv(netdev); + pci_ers_result_t ret; + + LOG_INFO_BDF("oops, vf pci dev[%p] got io slot reset\n",pdev); + + if (pci_enable_device_mem(pdev)) { + LOG_DEV_ERR("cannot re-enable PCI device after reset.\n"); + ret = PCI_ERS_RESULT_DISCONNECT; + goto l_out; + } + + smp_mb__before_atomic(); + clear_bit(SXEVF_DISABLED, &adapter->state); + pci_set_master(pdev); + + sxevf_reset(adapter); + ret = PCI_ERS_RESULT_RECOVERED; + +l_out: + LOG_INFO_BDF("vf pci dev[%p] io slot reset done. ret=0x%x\n", + pdev, (u32)ret); + return ret; +} + +STATIC pci_ers_result_t sxevf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct sxevf_adapter *adapter = netdev_priv(netdev); + pci_ers_result_t ret; + + LOG_DEBUG_BDF("oops,vf pci dev[%p] got io error detect, state=0x%x\n", + pdev, (u32)state); + + if (!test_bit(SXEVF_MONITOR_WORK_INITED, &adapter->state)) { + LOG_ERROR_BDF("vf monitor not inited\n"); + ret = PCI_ERS_RESULT_DISCONNECT; + goto l_out; + } + + rtnl_lock(); + netif_device_detach(netdev); + + if (netif_running(netdev)) { + sxevf_terminate(adapter); + } + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + ret = PCI_ERS_RESULT_DISCONNECT; + goto l_out; + } + + if (!test_and_set_bit(SXEVF_DISABLED, &adapter->state)) { + LOG_DEBUG_BDF("vf set disabled\n"); + pci_disable_device(pdev); + } + + rtnl_unlock(); + + ret = PCI_ERS_RESULT_NEED_RESET; + +l_out: + LOG_INFO_BDF("vf detected io error detected end, ret=0x%x.\n", ret); + return ret; +} + +static const struct pci_device_id sxevf_pci_tbl[] = { + {PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SXE_BOARD_VF}, + {PCI_VENDOR_ID_STARS, SXEVF_DEV_ID_ASIC_HV, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SXE_BOARD_VF_HV}, + {0, } +}; + +static const struct pci_error_handlers sxevf_err_handler = { + .error_detected = sxevf_io_error_detected, + .slot_reset = sxevf_io_slot_reset, + .resume = sxevf_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(sxevf_pm_ops, sxevf_suspend, sxevf_resume); +static struct pci_driver sxevf_pci_driver = { + .name = SXEVF_DRV_NAME, + .id_table = sxevf_pci_tbl, + .probe = sxevf_probe, + .remove = sxevf_remove, + .driver.pm = &sxevf_pm_ops, + .shutdown = sxevf_shutdown, + .err_handler = &sxevf_err_handler, +}; + +STATIC int __init sxevf_init(void) +{ + int ret; + + LOG_PRVF_INFO("version[%s], commit_id[%s]," + "branch[%s], build_time[%s]\n", + SXE_VERSION, + SXE_COMMIT_ID, + SXE_BRANCH, + SXE_BUILD_TIME); + +#ifndef SXE_DRIVER_RELEASE + ret = sxe_log_init(true); + if (ret < 0) { + LOG_PRVF_ERR("sxe log init fail.(err:%d)\n", ret); + goto l_end; + } +#endif + + sxevf_wq = create_singlethread_workqueue(SXEVF_DRV_NAME); + if (!sxevf_wq) { + LOG_PRVF_ERR("failed to create workqueue\n"); + ret = -ENOMEM; + goto l_log_exit; + } + + ret = pci_register_driver(&sxevf_pci_driver); + if (ret) { + LOG_ERROR("%s driver register fail.(err:%d)\n", + sxevf_pci_driver.name, ret); + goto l_pci_register_driver_failed; + } + + LOG_INFO("pci driver:%s init done.\n", sxevf_pci_driver.name); + + return 0; + +l_pci_register_driver_failed: + destroy_workqueue(sxevf_wq); + sxevf_wq = NULL; + +l_log_exit: + +#ifndef SXE_DRIVER_RELEASE + sxe_log_exit(); +l_end: +#endif + return ret; +} + +struct workqueue_struct *sxevf_wq_get(void) +{ + return sxevf_wq; +} + +STATIC void __exit sxevf_exit(void) +{ + pci_unregister_driver(&sxevf_pci_driver); + + if (sxevf_wq) { + destroy_workqueue(sxevf_wq); + sxevf_wq = NULL; + } + + LOG_INFO("pci driver:%s exit done.\n", sxevf_pci_driver.name); + +#ifndef SXE_DRIVER_RELEASE + sxe_log_exit(); +#endif + + return; +} + +MODULE_DEVICE_TABLE(pci, sxevf_pci_tbl); +MODULE_INFO(build_time, SXE_BUILD_TIME); +MODULE_INFO(branch, SXE_BRANCH); +MODULE_INFO(commit_id, SXE_COMMIT_ID); +MODULE_DESCRIPTION(SXEVF_DRV_DESCRIPTION); +MODULE_AUTHOR(SXEVF_DRV_AUTHOR); +MODULE_VERSION(SXE_VERSION); +MODULE_LICENSE(SXE_DRV_LICENSE); + +module_init(sxevf_init); +module_exit(sxevf_exit); + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.c new file mode 100644 index 000000000000..603ba7f89983 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.c @@ -0,0 +1,417 @@ +#include + +#include "sxevf.h" +#include "sxevf_netdev.h" +#include "sxevf_monitor.h" +#include "sxevf_ethtool.h" +#include "sxevf_msg.h" + +#define SXEVF_CHECK_LINK_TIMER_PERIOD (HZ / 10) +#define SXEVF_NORMAL_TIMER_PERIOD (HZ * 2) + +#define SXEVF_CHECK_LINK_CYCLE_CNT (5) +#define SXEVF_CHECK_LINK_DELAY_TIME (100) + +void sxevf_task_timer_trigger(struct sxevf_adapter *adapter) +{ + set_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + LOG_DEBUG_BDF("link check requester, state=%lx, monitor_state=%lx, is_up=%d\n", + adapter->state, adapter->monitor_ctxt.state, adapter->link.is_up); + + mod_timer(&adapter->monitor_ctxt.timer, jiffies); + + return; +} + +void sxevf_monitor_work_schedule(struct sxevf_adapter *adapter) +{ + struct workqueue_struct *wq = sxevf_wq_get(); + + if (!test_bit(SXEVF_DOWN, &adapter->state) && + !test_bit(SXEVF_REMOVING, &adapter->state) && + !test_and_set_bit(SXEVF_MONITOR_WORK_SCHED, + &adapter->monitor_ctxt.state)) { + + queue_work(wq, &adapter->monitor_ctxt.work); + } + + return; +} + +static void sxevf_timer_cb(struct timer_list *timer) +{ + struct sxevf_monitor_context *monitor = container_of(timer, struct sxevf_monitor_context, + timer); + struct sxevf_adapter *adapter = container_of(monitor, struct sxevf_adapter, + monitor_ctxt); + unsigned long period ; + + if (test_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state)) { + period = SXEVF_CHECK_LINK_TIMER_PERIOD; + } else { + period = SXEVF_NORMAL_TIMER_PERIOD; + } + + mod_timer(&adapter->monitor_ctxt.timer, period + jiffies); + + sxevf_monitor_work_schedule(adapter); + + return; +} + +static void sxevf_monitor_work_complete(struct sxevf_adapter *adapter) +{ + BUG_ON(!test_bit(SXEVF_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state)); + + smp_mb__before_atomic(); + clear_bit(SXEVF_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); + + return; +} + +STATIC s32 sxevf_ctrl_msg_check(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_ctrl_msg ctrl_msg; + s32 ret; + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_ctrl_msg_rcv_and_clear(hw, (u32 *)&ctrl_msg, + SXEVF_MSG_NUM(sizeof(struct sxevf_ctrl_msg))); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) { + LOG_ERROR_BDF("ctrl msg rcv fail due to lock fail.(err:%d)\n", ret); + goto l_end; + } + + if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_REINIT) { + adapter->link.need_reinit = true; + clear_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state); + LOG_WARN_BDF("rcv ctrl msg:0x%x need reinit vf.\n", + ctrl_msg.msg_type); + } else if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_NETDEV_DOWN) { + adapter->link.is_up = false; + + set_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state); + LOG_WARN_BDF("rcv ctrl msg:0x%x need link down.\n", ctrl_msg.msg_type); + } else if (ctrl_msg.msg_type & SXEVF_PF_CTRL_MSG_LINK_UPDATE) { + adapter->link.is_up = true; + LOG_WARN_BDF("rcv ctrl msg:0x%x physical link up.\n", ctrl_msg.msg_type); + } + +l_end: + return ret; +} + +static void sxevf_physical_link_check(struct sxevf_adapter *adapter) +{ + u32 link_reg, i; + u32 msg; + struct sxevf_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + sxevf_ctrl_msg_rcv(hw, &msg, 1); + spin_unlock_bh(&adapter->mbx_lock); + + link_reg = hw->setup.ops->link_state_get(hw); + if (!(link_reg & SXE_VFLINKS_UP)) { + adapter->link.is_up = false; + goto l_end; + } + + for (i = 0; i < SXEVF_CHECK_LINK_CYCLE_CNT; i++) { + udelay(SXEVF_CHECK_LINK_DELAY_TIME); + link_reg = hw->setup.ops->link_state_get(hw); + if (!(link_reg & SXE_VFLINKS_UP)) { + adapter->link.is_up = false; + goto l_end; + } + } + + switch (link_reg & SXE_VFLINKS_SPEED) { + case SXE_VFLINKS_SPEED_10G: + adapter->link.speed = SXEVF_LINK_SPEED_10GB_FULL; + break; + case SXE_VFLINKS_SPEED_1G: + adapter->link.speed = SXEVF_LINK_SPEED_1GB_FULL; + break; + case SXE_VFLINKS_SPEED_100: + adapter->link.speed = SXEVF_LINK_SPEED_100_FULL; + break; + } + + adapter->link.is_up = true; + +l_end: + LOG_INFO_BDF("link up status:%d.\n", adapter->link.is_up); + return; +} + +STATIC void sxevf_link_up_handle(struct sxevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (netif_carrier_ok(netdev)) { + goto l_end; + } + + LOG_DEV_INFO("NIC %s %s link state, down to up, speed=%s\n", + netdev_name(adapter->netdev), + dev_name(&adapter->pdev->dev), + (adapter->link.speed == SXEVF_LINK_SPEED_10GB_FULL) ? + "10 Gbps" : + (adapter->link.speed == SXEVF_LINK_SPEED_1GB_FULL) ? + "1 Gbps" : + (adapter->link.speed == SXEVF_LINK_SPEED_100_FULL) ? + "100 Mbps" : + "unknown speed"); + + netif_carrier_on(netdev); + +l_end: + return; +} + +static void sxevf_link_down_handle(struct sxevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->link.speed = 0; + + if (netif_carrier_ok(netdev)) { + LOG_DEV_INFO("NIC %s %s link state, up to down\n", + netdev_name(adapter->netdev), + dev_name(&adapter->pdev->dev)); + netif_carrier_off(netdev); + } + + return; +} + +static void sxevf_detect_link_work(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + s32 ret; + + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + goto l_end; + } + + if (!sxevf_pf_rst_check(hw) || !hw->mbx.retry || + (test_bit(SXEVF_NETDEV_DOWN, &adapter->monitor_ctxt.state))) { + LOG_WARN_BDF("checked pf reset not done or someone timeout:%d " + "monitor state:0x%lx.\n", + hw->mbx.retry, adapter->monitor_ctxt.state); + set_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + } + + if (!test_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state)) { + goto l_end; + } + + sxevf_physical_link_check(adapter); + + if (adapter->link.is_up) { + ret = sxevf_ctrl_msg_check(adapter); + if (ret) { + LOG_ERROR_BDF("ctrl msg rcv fail, try to next workqueue.\n"); + goto l_end; + } + + if (adapter->link.need_reinit || !hw->mbx.retry) { + adapter->link.need_reinit = false; + adapter->link.is_up = false; + set_bit(SXEVF_RESET_REQUESTED, &adapter->monitor_ctxt.state); + } + } + + if (adapter->link.is_up) { + clear_bit(SXEVF_LINK_CHECK_REQUESTED, &adapter->monitor_ctxt.state); + if(adapter->link.link_enable){ + sxevf_link_up_handle(adapter); + } + } else { + sxevf_link_down_handle(adapter); + } + +l_end: + return; +} + +static void sxevf_reset_work(struct sxevf_adapter *adapter) +{ + if (!test_and_clear_bit(SXEVF_RESET_REQUESTED, &adapter->monitor_ctxt.state)) { + goto l_end; + } + + rtnl_lock(); + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_REMOVING, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + rtnl_unlock(); + goto l_end; + } + + LOG_ERROR_BDF("reset adapter\n"); + adapter->stats.sw.tx_timeout_count++; + + sxevf_hw_reinit(adapter); + rtnl_unlock(); + +l_end: + return; +} + +static void sxevf_check_hang_work(struct sxevf_adapter *adapter) +{ + u32 i; + u64 eics = 0; + struct sxevf_irq_data *irq_priv; + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + + goto l_end; + } + + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) + SXEVF_TX_HANG_PROC_ACTIVE(tx_ring[i]); + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) + SXEVF_TX_HANG_PROC_ACTIVE(xdp_ring[i]); + } + + for (i = 0; i < adapter->irq_ctxt.ring_irq_num; i++) { + irq_priv = adapter->irq_ctxt.irq_data[i]; + if (irq_priv->tx.list.next || + irq_priv->rx.list.next) + eics |= BIT(i); + } + + hw->irq.ops->ring_irq_trigger(hw, eics); + + LOG_INFO_BDF("set check hang flag ok eics:0x%llx\n", eics); + +l_end: + return; +} + +static void sxevf_stats_update_work(struct sxevf_adapter *adapter) +{ + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + return; + } + + sxevf_update_stats(adapter); + + return; +} + +static s32 sxevf_hw_fault_handle_task(struct sxevf_adapter *adapter) +{ + s32 ret = 0; + + if (sxevf_is_hw_fault(&adapter->hw)) { + if (!test_bit(SXEVF_DOWN, &adapter->state)) { + rtnl_lock(); + sxevf_down(adapter); + rtnl_unlock(); + } + + LOG_ERROR_BDF("sxe nic fault\n"); + ret = -EFAULT; + } + + return ret; +} + +STATIC void sxevf_ring_reassign_work(struct sxevf_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + + if (!test_and_clear_bit(SXEVF_RING_REASSIGN_REQUESTED, + &adapter->monitor_ctxt.state)) { + goto l_end; + } + + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + goto l_end; + } + + rtnl_lock(); + + if (netif_running(dev)) { + sxevf_close(dev); + } + + sxevf_ring_irq_exit(adapter); + sxevf_ring_irq_init(adapter); + + if (netif_running(dev)) { + sxevf_open(dev); + } + + rtnl_unlock(); + +l_end: + return; +} + +STATIC void sxevf_work_cb(struct work_struct *work) +{ + struct sxevf_monitor_context *monitor = container_of(work, struct sxevf_monitor_context, + work); + struct sxevf_adapter *adapter = container_of(monitor, struct sxevf_adapter, + monitor_ctxt); + + if (sxevf_hw_fault_handle_task(adapter)) { + goto l_end; + } + + sxevf_ring_reassign_work(adapter); + sxevf_reset_work(adapter); + sxevf_detect_link_work(adapter); + sxevf_stats_update_work(adapter); + sxevf_check_hang_work(adapter); + +l_end: + sxevf_monitor_work_complete(adapter); + + return; +} + +static void sxevf_hw_fault_task_trigger(void *priv) +{ + struct sxevf_adapter *adapter = (struct sxevf_adapter *)priv; + + if (test_bit(SXEVF_MONITOR_WORK_INITED, + &adapter->monitor_ctxt.state)) { + sxevf_monitor_work_schedule(adapter); + LOG_ERROR_BDF("sxe vf nic fault, submit monitor task and " + "perform the down operation\n"); + } + + return; +} + +void sxevf_monitor_init(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + + timer_setup(&adapter->monitor_ctxt.timer, sxevf_timer_cb, 0); + + INIT_WORK(&adapter->monitor_ctxt.work, sxevf_work_cb); + + set_bit(SXEVF_MONITOR_WORK_INITED, &adapter->monitor_ctxt.state); + clear_bit(SXEVF_MONITOR_WORK_SCHED, &adapter->monitor_ctxt.state); + + sxevf_hw_fault_handle_init(hw, sxevf_hw_fault_task_trigger, adapter); + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.h new file mode 100644 index 000000000000..753415477cf0 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_monitor.h @@ -0,0 +1,42 @@ +#ifndef __SXEVF_MONITOR_H__ +#define __SXEVF_MONITOR_H__ + +struct sxevf_adapter; + +enum sxevf_monitor_task_state { + SXEVF_MONITOR_WORK_INITED, + + SXEVF_MONITOR_WORK_SCHED, + + SXEVF_RESET_REQUESTED, + + SXEVF_LINK_CHECK_REQUESTED, + + SXEVF_RING_REASSIGN_REQUESTED, + + SXEVF_NETDEV_DOWN, +}; + +struct sxevf_monitor_context { + struct timer_list timer; + struct work_struct work; + unsigned long state; +}; + +struct sxevf_link_info { + u8 is_up :1; + u8 need_reinit :1; + u8 link_enable :1; + u8 reservd :5; + u32 speed; + + unsigned long check_timeout; +}; + +void sxevf_task_timer_trigger(struct sxevf_adapter *adapter); + +void sxevf_monitor_init(struct sxevf_adapter *adapter); + +void sxevf_monitor_work_schedule(struct sxevf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.c new file mode 100644 index 000000000000..b18f536f9f4a --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.c @@ -0,0 +1,817 @@ +#include +#include + +#include "sxevf_hw.h" +#include "sxevf_msg.h" +#include "sxevf.h" +#include "sxevf_regs.h" +#include "sxe_log.h" + +#define SXEVF_PFMSG_MASK 0xFF00 + +#define SXEVF_RESET_DELAY_TIME 10 +#define SXEVF_SYNC_UCADDR_DELAY_TIME 200 + +#define SXEVF_REDIR_TBL_ENTRY_NUM_PER_UINT 16 +#define SXEVF_REDIR_TBL_ENTRY_BITS 2 + +void sxevf_mbx_init(struct sxevf_hw *hw) +{ + hw->mbx.msg_len = SXEVF_MBX_MSG_NUM; + + hw->mbx.stats.rcv_msgs = 0; + hw->mbx.stats.send_msgs = 0; + hw->mbx.stats.acks = 0; + hw->mbx.stats.reqs = 0; + hw->mbx.stats.rsts = 0; + + hw->mbx.retry = 0; + hw->mbx.interval = SXEVF_MBX_RETRY_INTERVAL; + + return; +} + +static u32 sxevf_mbx_reg_read(struct sxevf_hw *hw) +{ + u32 value = hw->mbx.ops->mailbox_read(hw); + + value |= hw->mbx.reg_value; + + hw->mbx.reg_value |= value & SXE_VFMAILBOX_RC_BIT; + + return value; +} + +static bool sxevf_mbx_bit_check(struct sxevf_hw *hw, u32 mask) +{ + bool ret = false; + u32 value = sxevf_mbx_reg_read(hw); + + if (value & mask) { + ret = true; + } + + hw->mbx.reg_value &= ~mask; + + return ret; +} + +STATIC bool sxevf_pf_msg_check(struct sxevf_hw *hw) +{ + bool ret = false; + + if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFSTS)) { + hw->mbx.stats.reqs++; + ret = true; + } + + return ret; +} + +STATIC bool sxevf_pf_ack_check(struct sxevf_hw *hw) +{ + bool ret = false; + + if (sxevf_mbx_bit_check(hw, SXE_VFMAILBOX_PFACK)) { + hw->mbx.stats.acks++; + ret = true; + } + + return ret; +} + +bool sxevf_pf_rst_check(struct sxevf_hw *hw) +{ + bool ret = false; + + if (!sxevf_mbx_bit_check(hw, (SXE_VFMAILBOX_RSTI | + SXE_VFMAILBOX_RSTD))) { + hw->mbx.stats.rsts++; + ret = true; + } + + return ret; +} + +STATIC s32 sxevf_mailbox_lock(struct sxevf_hw *hw) +{ + u32 mailbox; + u32 retry = SXEVF_MBX_RETRY_COUNT; + s32 ret = -SXEVF_ERR_MBX_LOCK_FAIL; + + while (retry--) { + mailbox = sxevf_mbx_reg_read(hw); + mailbox |= SXE_VFMAILBOX_VFU; + hw->mbx.ops->mailbox_write(hw, mailbox); + + if (sxevf_mbx_reg_read(hw) && SXE_VFMAILBOX_VFU) { + ret = 0; + break; + } + + udelay(hw->mbx.interval); + } + + return ret; +} + +static void sxevf_mailbox_unlock(struct sxevf_hw *hw) +{ + u32 mailbox; + + mailbox = sxevf_mbx_reg_read(hw); + mailbox &= ~SXE_VFMAILBOX_VFU; + hw->mbx.ops->mailbox_write(hw, mailbox); + + return; +} + +STATIC bool sxevf_msg_poll(struct sxevf_hw *hw) +{ + struct sxevf_mbx_info *mbx = &hw->mbx; + u32 retry = mbx->retry; + bool ret = true; + struct sxevf_adapter *adapter = hw->adapter; + + while (!sxevf_pf_msg_check(hw) && retry) { + retry--; + udelay(mbx->interval); + } + + if (!retry) { + LOG_ERROR_BDF("retry:%d use up, but don't check pf reply," + " clear retry to 0\n", + mbx->retry); + mbx->retry = 0; + ret = false; + } + + return ret; +} + +STATIC bool sxevf_ack_poll(struct sxevf_hw *hw) +{ + struct sxevf_mbx_info *mbx = &hw->mbx; + u32 retry = mbx->retry; + bool ret = true; + struct sxevf_adapter *adapter = hw->adapter; + + while (!sxevf_pf_ack_check(hw) && retry) { + retry--; + udelay(mbx->interval); + } + + if (!retry) { + LOG_ERROR_BDF("send msg to pf, retry:%d but don't check pf ack, " + "init mbx retry to 0.\n", + mbx->retry); + mbx->retry = 0; + ret = false; + } + + return ret; +} + +static void sxevf_pf_msg_and_ack_clear(struct sxevf_hw *hw) +{ + struct sxevf_adapter *adapter = hw->adapter; + + LOG_INFO_BDF("clear pending pf msg and ack.\n"); + + sxevf_pf_msg_check(hw); + sxevf_pf_ack_check(hw); + + return; +} + +static s32 sxevf_send_msg_to_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len) +{ + struct sxevf_mbx_info *mbx = &hw->mbx; + s32 ret = 0; + u16 i; + u32 old; + struct sxevf_adapter *adapter = hw->adapter; + + if (!mbx->retry) { + ret = -SXEVF_ERR_NOT_READY; + LOG_ERROR_BDF("msg:0x%x len:%d send fail due to retry:0.(err:%d)\n", + msg[0], msg_len, ret); + goto l_out; + } + + if (msg_len > mbx->msg_len) { + ret = -EINVAL; + LOG_ERROR_BDF("vf msg:0x%x len:%d exceed limit:%d " + "send fail.(err:%d)\n", + msg[0], msg_len, mbx->msg_len, ret); + goto l_out; + } + + ret = sxevf_mailbox_lock(hw); + if (ret) { + LOG_ERROR_BDF("msg:0x%x len:%d send lock mailbox fail.(err:%d)\n", + msg[0], msg_len, ret); + goto l_out; + } + + sxevf_pf_msg_and_ack_clear(hw); + + old = hw->mbx.ops->msg_read(hw, 0); + msg[0] |= (old & SXEVF_PFMSG_MASK); + + for (i = 0; i < msg_len; i++) { + hw->mbx.ops->msg_write(hw, i, msg[i]); + } + + hw->mbx.ops->pf_req_irq_trigger(hw); + + hw->mbx.stats.send_msgs++; + + if (!sxevf_ack_poll(hw)) { + ret = -SXEVF_ERR_POLL_ACK_FAIL; + LOG_ERROR_BDF("msg:0x%x len:%d send done, but don't poll ack.\n", + msg[0], msg_len); + goto l_out; + } + + LOG_INFO_BDF("vf send msg:0x%x len:%d to pf and polled pf ack done." + "stats send_msg:%d ack:%d.\n", + msg[0], msg_len, + mbx->stats.send_msgs, mbx->stats.acks); + +l_out: + return ret; +} + +s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len) +{ + u32 i; + u16 msg_entry; + s32 ret = 0; + struct sxevf_mbx_info *mbx = &hw->mbx; + struct sxevf_adapter *adapter = hw->adapter; + + msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len; + + ret = sxevf_mailbox_lock(hw); + if (ret) { + LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n", + msg_entry, ret); + goto l_end; + } + + for (i = 0; i < msg_entry; i++) { + msg[i] = hw->mbx.ops->msg_read(hw, i); + } + + msg[0] &= ~SXEVF_PFMSG_MASK; + + hw->mbx.ops->pf_ack_irq_trigger(hw); + + mbx->stats.rcv_msgs++; +l_end: + return ret; + +} + +s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len) +{ + u16 i; + u16 msg_entry; + s32 ret = 0; + struct sxevf_mbx_info *mbx = &hw->mbx; + struct sxevf_adapter *adapter = hw->adapter; + + msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len; + + ret = sxevf_mailbox_lock(hw); + if (ret) { + LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n", + msg_entry, ret); + goto l_end; + } + + for (i = 0; i < msg_entry; i++) { + msg[i] = hw->mbx.ops->msg_read(hw, i); + } + + sxevf_mailbox_unlock(hw); + + LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg); + + mbx->stats.rcv_msgs++; +l_end: + return ret; +} + +s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len) +{ + u16 i; + u16 msg_entry; + s32 ret = 0; + u32 clear; + struct sxevf_mbx_info *mbx = &hw->mbx; + struct sxevf_adapter *adapter = hw->adapter; + + msg_entry = (msg_len > mbx->msg_len) ? mbx->msg_len : msg_len; + + ret = sxevf_mailbox_lock(hw); + if (ret) { + LOG_ERROR_BDF("size:%d rcv lock mailbox fail.(err:%d)\n", + msg_entry, ret); + goto l_end; + } + + for (i = 0; i < msg_entry; i++) { + msg[i] = hw->mbx.ops->msg_read(hw, i); + } + + clear = msg[0] & (~SXEVF_PFMSG_MASK); + hw->mbx.ops->msg_write(hw, 0, clear); + + sxevf_mailbox_unlock(hw); + + LOG_INFO_BDF("rcv pf mailbox msg:0x%x.\n", *msg); + + mbx->stats.rcv_msgs++; +l_end: + return ret; +} + +static s32 sxevf_rcv_msg_from_pf(struct sxevf_hw *hw, u32 *msg, u16 msg_len) +{ + s32 ret; + struct sxevf_adapter *adapter = hw->adapter; + + if (!sxevf_msg_poll(hw)) { + ret = -SXEVF_ERR_POLL_MSG_FAIL; + LOG_ERROR_BDF("retry:%d poll pf msg fail.\n", hw->mbx.retry); + goto l_out; + } + + ret = sxevf_mbx_msg_rcv(hw, msg, msg_len); + if (ret < 0) { + LOG_ERROR_BDF("retry:%d read msg fail.\n", hw->mbx.retry); + goto l_out; + } + + LOG_INFO_BDF("vf polled msg:0x%x from pf and rcv pf msg done. " + "stats req:%d rcv_msg:%d\n", + msg[0], hw->mbx.stats.reqs, hw->mbx.stats.rcv_msgs); + +l_out: + return ret; +} + +s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len) +{ + s32 ret; + u16 msg_type = msg[0] & 0xFF; + struct sxevf_adapter *adapter = hw->adapter; + + ret = sxevf_send_msg_to_pf(hw, msg, msg_len); + if (ret) { + LOG_ERROR_BDF("msg:0x%x len:%u msg send fail.(err:%d).\n", + msg[0], msg_len, ret); + goto l_out; + } + + if (msg_type == SXEVF_RESET) { + mdelay(SXEVF_RESET_DELAY_TIME); + } + + ret = sxevf_rcv_msg_from_pf(hw, msg, msg_len); + if (ret) { + LOG_ERROR_BDF("msg:0x%x len:%u rcv fail.(err:%d).\n", + msg[0], msg_len, ret); + goto l_out; + } + +l_out: + return ret; +} + +STATIC s32 __sxevf_uc_addr_set(struct sxevf_hw *hw, u8 *uc_addr) +{ + s32 ret; + struct sxevf_uc_addr_msg msg = {}; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_DEV_MAC_ADDR_SET; + ether_addr_copy(msg.uc_addr, uc_addr); + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + if (!ret && (msg.msg_type == + (SXEVF_DEV_MAC_ADDR_SET | SXEVF_MSGTYPE_NACK))) { + ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr, adapter->mac_filter_ctxt.def_uc_addr); + ret = -EPERM; + LOG_ERROR_BDF("msg:0x%x uc addr:%pM replyed nack.\n", + msg.msg_type, uc_addr); + goto l_out; + } + + if (ret) { + LOG_ERROR_BDF("msg:0x%x uc addr:%pM set fail.(err:%d)\n", + msg.msg_type, uc_addr, ret); + ret = -EPERM; + goto l_out; + } + + LOG_INFO_BDF("msg:0x%x uc addr:%pM set success.\n", msg.msg_type, uc_addr); + +l_out: + return ret; +} + +STATIC s32 __sxevf_hv_uc_addr_set(struct sxevf_hw *hw, u8 *uc_addr) +{ + struct sxevf_adapter *adapter = hw->adapter; + s32 ret = -EOPNOTSUPP; + + if (ether_addr_equal(uc_addr, adapter->mac_filter_ctxt.def_uc_addr)) { + ret = 0; + } + + return ret; +} + +s32 sxevf_uc_addr_set(struct sxevf_hw *hw, u8 *uc_addr) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_uc_addr_set(hw, uc_addr); + } else { + ret = __sxevf_uc_addr_set(hw, uc_addr); + } + + return ret; +} + +STATIC s32 __sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode) +{ + struct sxevf_cast_mode_msg msg = {}; + s32 ret; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_CAST_MODE_SET; + msg.cast_mode = mode; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + if (ret || (msg.msg_type != (SXEVF_CAST_MODE_SET | SXEVF_MSGTYPE_ACK))) { + ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR; + } + + LOG_INFO_BDF("msg_type:0x%x mode:0x%x msg result:0x%x.(ret:%d)\n", + msg.msg_type, mode, msg.msg_type, ret); + + return ret; +} + +STATIC s32 __sxevf_hv_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode) +{ + return -EOPNOTSUPP; +} + +s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_cast_mode_set(hw, mode); + } else { + ret = __sxevf_cast_mode_set(hw, mode); + } + + return ret; + +} + +static u16 sxevf_mc_addr_extract(u8 *mc_addr) +{ + u16 result = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + + result &= 0xFFF; + + LOG_INFO("extract result:0x%03x mc_addr:%pM\n", result, mc_addr); + + return result; +} + +STATIC s32 __sxevf_mc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev) +{ + struct netdev_hw_addr *hw_addr; + struct sxevf_mc_sync_msg msg = {}; + u16 mc_cnt = min_t(u16, netdev_mc_count(netdev), SXEVF_MC_ENTRY_NUM_MAX); + s32 ret; + u8 i = 0; + u32 result; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_MC_ADDR_SYNC; + msg.mc_cnt = mc_cnt; + + netdev_for_each_mc_addr(hw_addr, netdev) { + if ((i < mc_cnt) && + !is_link_local_ether_addr(hw_addr->addr)) { + msg.mc_addr_extract[i++] = sxevf_mc_addr_extract(hw_addr->addr); + } + } + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + result = *(u32 *)&msg; + + if (ret || ((result & SXEVF_MC_ADDR_SYNC) && + (result & SXEVF_MSGTYPE_NACK))) { + ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR; + goto l_out; + } + + LOG_INFO_BDF("msg_type:0x%x len:%zu mc_cnt:%d msg " + "result:0x%x.(ret:%d)\n", + msg.msg_type, SXEVF_MSG_NUM(sizeof(msg)), + mc_cnt, result, ret); + +l_out: + return ret; +} + +STATIC s32 __sxevf_hv_mc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev) +{ + return -EOPNOTSUPP; +} + +s32 sxevf_mc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_mc_addr_sync(hw, netdev); + } else { + ret = __sxevf_mc_addr_sync(hw, netdev); + } + + return ret; +} + +STATIC s32 __sxevf_per_uc_addr_sync(struct sxevf_hw *hw, u16 index, u8 *addr) +{ + struct sxevf_uc_sync_msg msg = {}; + s32 ret; + u32 result; + u32 check; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_UC_ADDR_SYNC; + msg.index = index; + check = *(u32 *)&msg; + + if (addr) { + ether_addr_copy((u8 *)&msg.addr, addr); + } + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + result = *(u32 *)&msg; + + if (ret || (result != (check | SXEVF_MSGTYPE_ACK))) { + ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR; + } + + LOG_INFO_BDF("msg_type:0x%x index:%d addr:%pM sync done " + " result:0x%x msg.(ret:%d)\n", + msg.msg_type, index, addr, result, ret); + + return ret; +} + +STATIC s32 __sxevf_hv_per_uc_addr_sync(struct sxevf_hw *hw, u16 index, u8 *addr) +{ + return -EOPNOTSUPP; +} + +static s32 sxevf_per_uc_addr_sync(struct sxevf_hw *hw, u16 index, u8 *addr) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_per_uc_addr_sync(hw, index, addr); + } else { + ret = __sxevf_per_uc_addr_sync(hw, index, addr); + } + + return ret; +} + +s32 sxevf_uc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev) +{ + u32 index = 0; + u32 uc_cnt = netdev_uc_count(netdev); + s32 ret = 0; + struct sxevf_adapter *adapter = hw->adapter; + + if (uc_cnt > SXEVF_UC_ENTRY_NUM_MAX) { + ret = -SXEVF_ERR_ARGUMENT_INVALID; + LOG_DEV_ERR("dev uc list cnt:%d exceed limit:10.(err:%d)\n", + uc_cnt, ret); + goto l_out; + } + + if (uc_cnt) { + struct netdev_hw_addr *hw_addr; + LOG_INFO_BDF("uc_cnt:%u.\n", uc_cnt); + netdev_for_each_uc_addr(hw_addr, netdev) { + sxevf_per_uc_addr_sync(hw, ++index, hw_addr->addr); + udelay(SXEVF_SYNC_UCADDR_DELAY_TIME); + } + } else { + LOG_INFO_BDF("dev uc list null, send msg to pf to clear vf mac list.\n"); + sxevf_per_uc_addr_sync(hw, 0, NULL); + } + +l_out: + return ret; +} + +STATIC s32 __sxevf_redir_tbl_get(struct sxevf_hw *hw, int rx_ring_num, u32 *redir_tbl) +{ + s32 ret; + u32 i, j; + u32 mask = 0; + struct sxevf_redir_tbl_msg msg = {}; + struct sxevf_adapter *adapter = hw->adapter; + + msg.type = SXEVF_REDIR_TBL_GET; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + if (ret) { + LOG_ERROR_BDF("send and rece err, ret=%d\n", ret); + goto l_end; + } + + if ((msg.type & SXEVF_REDIR_TBL_GET) && (msg.type & SXEVF_MSGTYPE_NACK)) { + LOG_ERROR_BDF("req has been refused, msg type=%x\n", msg.type); + ret = -EPERM; + goto l_end; + } + + if (rx_ring_num > 1) { + mask = 0x1; + } + + for (i = 0; i < SXEVF_RETA_ENTRIES_DWORDS; i++) { + for (j = 0; j < SXEVF_REDIR_TBL_ENTRY_NUM_PER_UINT; j++) { + redir_tbl[i * SXEVF_REDIR_TBL_ENTRY_NUM_PER_UINT + j] = + (msg.entries[i] >> (SXEVF_REDIR_TBL_ENTRY_BITS * j)) & mask; + } + } + +l_end: + return ret; +} + +STATIC s32 __sxevf_hv_redir_tbl_get(struct sxevf_hw *hw, int rx_ring_num, u32 *redir_tbl) +{ + return -EOPNOTSUPP; +} + +s32 sxevf_redir_tbl_get(struct sxevf_hw *hw, int rx_ring_num, u32 *redir_tbl) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_redir_tbl_get(hw, rx_ring_num, redir_tbl); + } else { + ret = __sxevf_redir_tbl_get(hw, rx_ring_num, redir_tbl); + } + + return ret; +} + +STATIC s32 __sxevf_rss_hash_key_get(struct sxevf_hw *hw, u8 *rss_key) +{ + s32 ret; + struct sxevf_adapter *adapter = hw->adapter; + + struct sxevf_rss_hsah_key_msg msg; + msg.type = SXEVF_RSS_KEY_GET; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, SXEVF_MSG_NUM(sizeof(msg))); + if (ret) { + LOG_ERROR_BDF("send and rece err, ret=%d\n", ret); + goto l_end; + } + + if ((msg.type & SXEVF_RSS_KEY_GET) && (msg.type & SXEVF_MSGTYPE_NACK)) { + LOG_ERROR_BDF("req has been refused, msg type=%x\n", msg.type); + ret = -EPERM; + goto l_end; + } + + memcpy(rss_key, msg.hash_key, SXEVF_RSS_HASH_KEY_SIZE); + +l_end: + return ret; +} + +STATIC s32 __sxevf_hv_rss_hash_key_get(struct sxevf_hw *hw, u8 *rss_key) +{ + return -EOPNOTSUPP; +} + +s32 sxevf_rss_hash_key_get(struct sxevf_hw *hw, u8 *rss_key) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_rss_hash_key_get(hw, rss_key); + } else { + ret = __sxevf_rss_hash_key_get(hw, rss_key); + } + + return ret; +} + +STATIC s32 __sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 max_size) +{ + struct sxevf_max_frame_msg msg = {}; + s32 ret; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_LPE_SET; + msg.max_frame = max_size; + + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + if (ret || ((msg.msg_type & SXEVF_LPE_SET) && + (msg.msg_type & SXEVF_MSGTYPE_NACK))) { + ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR; + } + + LOG_INFO_BDF("msg_type:0x%x max_frame:0x%x (ret:%d)\n", + msg.msg_type, msg.max_frame, ret); + + return ret; +} + +STATIC s32 __sxevf_hv_rx_max_frame_set(struct sxevf_hw *hw, u32 max_size) +{ + return 0; +} + +s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 max_size) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_rx_max_frame_set(hw, max_size); + } else { + ret = __sxevf_rx_max_frame_set(hw, max_size); + } + + return ret; +} + +STATIC s32 __sxe_vf_filter_array_vid_update(struct sxevf_hw *hw, u32 vlan, + bool vlan_on) +{ + struct sxevf_vlan_filter_msg msg = {}; + s32 ret; + struct sxevf_adapter *adapter = hw->adapter; + + msg.msg_type = SXEVF_VLAN_SET; + msg.vlan_id = vlan; + msg.msg_type |= vlan_on << SXEVF_MSGINFO_SHIFT; + + LOG_INFO_BDF("update vlan[%u], vlan on = %s\n", vlan, vlan_on ? "yes" : "no"); + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + LOG_INFO_BDF("update vlan[%u] ret = %d\n",vlan, ret); + + msg.msg_type &= ~(0xFF << SXEVF_MSGINFO_SHIFT); + + if (ret || (msg.msg_type != (SXEVF_VLAN_SET | SXEVF_MSGTYPE_ACK))) { + ret = ret ? ret : -SXEVF_ERR_MSG_HANDLE_ERR; + } + + return ret; +} + +STATIC s32 __sxevf_hv_filter_array_vid_update(struct sxevf_hw *hw, u32 vlan, bool vlan_on) +{ + return -EOPNOTSUPP; +} + +s32 sxe_vf_filter_array_vid_update(struct sxevf_hw *hw, u32 vlan, + bool vlan_on) +{ + s32 ret; + + if (hw->board_type == SXE_BOARD_VF_HV) { + ret = __sxevf_hv_filter_array_vid_update(hw, vlan, vlan_on); + } else { + ret = __sxe_vf_filter_array_vid_update(hw, vlan, vlan_on); + } + + return ret; +} + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.h new file mode 100644 index 000000000000..bc1a41c83d01 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_msg.h @@ -0,0 +1,198 @@ +#ifndef __SXEVF_MSG_H__ +#define __SXEVF_MSG_H__ + +struct sxevf_adapter; + +#define SXEVF_UC_ENTRY_NUM_MAX 10 +#define SXEVF_MC_ENTRY_NUM_MAX 30 + +#define SXEVF_MBX_MSG_NUM 16 +#define SXEVF_MBX_RETRY_INTERVAL 500 +#define SXEVF_MBX_RETRY_COUNT 2000 + +#define SXEVF_RST_CHECK_NUM 200 +#define SXEVF_RST_CHECK_NUM_HV 200000 + +#define SXEVF_DEFAULT_ADDR_LEN 4 +#define SXEVF_MC_FILTER_TYPE_WORD 3 + +#define SXEVF_RESET 0x01 +#define SXEVF_DEV_MAC_ADDR_SET 0x02 +#define SXEVF_MC_ADDR_SYNC 0x03 +#define SXEVF_VLAN_SET 0x04 +#define SXEVF_LPE_SET 0x05 + +#define SXEVF_UC_ADDR_SYNC 0x06 + +#define SXEVF_API_NEGOTIATE 0x08 + +#define SXEVF_RING_INFO_GET 0x09 + +#define SXEVF_REDIR_TBL_GET 0x0a +#define SXEVF_RSS_KEY_GET 0x0b +#define SXEVF_CAST_MODE_SET 0x0c +#define SXEVF_LINK_ENABLE_GET 0X0d +#define SXEVF_IPSEC_ADD 0x0e +#define SXEVF_IPSEC_DEL 0x0f +#define SXEVF_RSS_CONF_GET 0x10 + +#define SXEVF_PF_CTRL_MSG_LINK_UPDATE 0x100 +#define SXEVF_PF_CTRL_MSG_NETDEV_DOWN 0x200 + +#define SXEVF_PF_CTRL_MSG_REINIT 0x400 + +#define SXEVF_PF_CTRL_MSG_MASK 0x700 +#define SXEVF_PFREQ_MASK 0xFF00 + +#define SXEVF_RSS_HASH_KEY_SIZE (40) +#define SXEVF_MAX_RETA_ENTRIES (128) +#define SXEVF_RETA_ENTRIES_DWORDS (SXEVF_MAX_RETA_ENTRIES / 16) + +#define SXEVF_TX_QUEUES 1 +#define SXEVF_RX_QUEUES 2 +#define SXEVF_TRANS_VLAN 3 +#define SXEVF_DEF_QUEUE 4 + +#define SXEVF_MSGTYPE_ACK 0x80000000 +#define SXEVF_MSGTYPE_NACK 0x40000000 + +#define SXEVF_MSGINFO_SHIFT 16 +#define SXEVF_MSGINFO_MASK (0xFF << SXEVF_MSGINFO_SHIFT) + +#define SXEVF_MSG_NUM(size) DIV_ROUND_UP(size, 4) + +enum sxevf_mbx_api_version { + SXEVF_MBX_API_10 = 0, + SXEVF_MBX_API_11, + SXEVF_MBX_API_12, + SXEVF_MBX_API_13, + SXEVF_MBX_API_14, + + SXEVF_MBX_API_NR, +}; + +enum sxevf_cast_mode { + SXEVF_CAST_MODE_NONE = 0, + SXEVF_CAST_MODE_MULTI, + SXEVF_CAST_MODE_ALLMULTI, + SXEVF_CAST_MODE_PROMISC, +}; + +struct sxevf_rst_msg { + u32 msg_type; + u32 mac_addr[2]; + u32 mc_fiter_type; + u32 sw_mtu; +}; + +struct sxevf_mbx_api_msg { + u32 msg_type; + u32 api_version; +}; + +struct sxevf_ring_info_msg { + u32 msg_type; + u8 max_rx_num; + u8 max_tx_num; + u8 tc_num; + u8 default_tc; +}; + +struct sxevf_uc_addr_msg { + u32 msg_type; + u8 uc_addr[ETH_ALEN]; + u16 pad; +}; + +struct sxevf_cast_mode_msg { + u32 msg_type; + u32 cast_mode; +}; + +struct sxevf_mc_sync_msg { + u16 msg_type; + u16 mc_cnt; + u16 mc_addr_extract[SXEVF_MC_ENTRY_NUM_MAX]; +}; + +struct sxevf_uc_sync_msg { + u16 msg_type; + u16 index; + u32 addr[2]; +}; + +struct sxevf_max_frame_msg { + u32 msg_type; + u32 max_frame; +}; + +struct sxevf_vlan_filter_msg { + u32 msg_type; + u32 vlan_id; +}; + +struct sxevf_redir_tbl_msg { + u32 type; + u32 entries[SXEVF_RETA_ENTRIES_DWORDS]; +}; + +struct sxevf_rss_hsah_key_msg { + u32 type; + u8 hash_key[SXEVF_RSS_HASH_KEY_SIZE]; +}; + +struct sxevf_ipsec_add_msg { + u32 msg_type; + u32 pf_sa_idx; + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; + +struct sxevf_ipsec_del_msg { + u32 msg_type; + u32 sa_idx; +}; + +struct sxevf_link_enable_msg { + u32 msg_type; + bool link_enable; +}; + +struct sxevf_ctrl_msg { + u32 msg_type; +}; + +s32 sxevf_redir_tbl_get(struct sxevf_hw *hw, int rx_ring_num, u32 *redir_tbl); + +s32 sxevf_rss_hash_key_get(struct sxevf_hw *hw, u8 *rss_key); + +s32 sxevf_mbx_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len); + +s32 sxevf_ctrl_msg_rcv(struct sxevf_hw *hw, u32 *msg, u16 msg_len); + +s32 sxevf_send_and_rcv_msg(struct sxevf_hw *hw, u32 *msg, u8 msg_len); + +s32 sxevf_uc_addr_set(struct sxevf_hw *hw, u8 *uc_addr); + +s32 sxevf_cast_mode_set(struct sxevf_hw *hw, enum sxevf_cast_mode mode); + +s32 sxevf_mc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev); + +s32 sxevf_uc_addr_sync(struct sxevf_hw *hw, struct net_device *netdev); + +void sxevf_mbx_init(struct sxevf_hw *hw); + +bool sxevf_pf_rst_check(struct sxevf_hw *hw); + +s32 sxevf_rx_max_frame_set(struct sxevf_hw *hw, u32 max_size); + +s32 sxe_vf_filter_array_vid_update(struct sxevf_hw *hw, u32 vlan, + bool vlan_on); + +s32 sxevf_ctrl_msg_rcv_and_clear(struct sxevf_hw *hw, u32 *msg, u16 msg_len); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.c new file mode 100644 index 000000000000..8110f1c8b88f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.c @@ -0,0 +1,820 @@ + +#include +#include +#include + +#include "sxevf_netdev.h" +#include "sxevf_hw.h" +#include "sxe_log.h" +#include "sxevf_pci.h" +#include "sxevf.h" +#include "sxevf_irq.h" +#include "sxevf_msg.h" +#include "sxevf_monitor.h" +#include "sxevf_tx_proc.h" +#include "sxevf_rx_proc.h" +#include "sxevf_hw.h" +#include "sxevf_ethtool.h" +#include "sxevf_debug.h" +#include "sxevf_ipsec.h" +#include "sxevf_xdp.h" + +#define SXEVF_MAX_MAC_HDR_LEN (127) +#define SXEVF_MAX_NETWORK_HDR_LEN (511) + +#define SXEVF_HW_DISABLE_SLEEP_TIME_MIN 10000 +#define SXEVF_HW_DISABLE_SLEEP_TIME_MAX 20000 + +void sxevf_reset(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + if (sxevf_dev_reset(hw)) { + LOG_DEV_ERR("reset still fail\n"); + } else { + sxevf_start_adapter(adapter); + sxevf_mbx_api_version_init(adapter); + LOG_DEBUG_BDF("hw reset finished\n"); + } + + if (is_valid_ether_addr(adapter->mac_filter_ctxt.cur_uc_addr)) { +#ifndef HAVE_ETH_HW_ADDR_SET_API + ether_addr_copy(netdev->dev_addr, + adapter->mac_filter_ctxt.cur_uc_addr); +#else + eth_hw_addr_set(netdev, adapter->mac_filter_ctxt.cur_uc_addr); +#endif + ether_addr_copy(netdev->perm_addr, + adapter->mac_filter_ctxt.cur_uc_addr); + } + + adapter->link.check_timeout = jiffies; + + return; +} + +static void sxevf_dev_mac_addr_set(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + + if (is_valid_ether_addr(adapter->mac_filter_ctxt.cur_uc_addr)) { + sxevf_uc_addr_set(hw, adapter->mac_filter_ctxt.cur_uc_addr); + } else { + sxevf_uc_addr_set(hw, adapter->mac_filter_ctxt.def_uc_addr); + } + + spin_unlock_bh(&adapter->mbx_lock); + + return; +} + +STATIC s32 sxevf_mac_addr_set(struct net_device *netdev, void *p) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + s32 ret; + + if (!is_valid_ether_addr(addr->sa_data)) { + ret = -EADDRNOTAVAIL; + LOG_ERROR_BDF("invalid mac addr:%pM.(err:%d)\n", addr->sa_data, ret); + goto l_out; + } + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_uc_addr_set(hw, addr->sa_data); + spin_unlock_bh(&adapter->mbx_lock); + + if (ret) { + LOG_ERROR_BDF("add vf mac addr:%pM to pf filter fail.(ret:%d)\n", + addr->sa_data, ret); + goto l_out; + } + + ether_addr_copy(adapter->mac_filter_ctxt.cur_uc_addr, addr->sa_data); + ether_addr_copy(adapter->mac_filter_ctxt.def_uc_addr, addr->sa_data); +#ifndef HAVE_ETH_HW_ADDR_SET_API + ether_addr_copy(netdev->dev_addr, addr->sa_data); +#else + eth_hw_addr_set(netdev, addr->sa_data); +#endif + + LOG_INFO_BDF("change vf cur and default mac addr to %pM done.\n", + addr->sa_data); + +l_out: + return ret; +} + +void sxevf_set_rx_mode(struct net_device *netdev) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + enum sxevf_cast_mode mode; + + if (netdev->flags & IFF_PROMISC) { + mode = SXEVF_CAST_MODE_PROMISC; + } else if (netdev->flags & IFF_ALLMULTI) { + mode = SXEVF_CAST_MODE_ALLMULTI; + } else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) { + mode = SXEVF_CAST_MODE_MULTI; + } else { + mode = SXEVF_CAST_MODE_NONE; + } + + spin_lock_bh(&adapter->mbx_lock); + + sxevf_cast_mode_set(hw, mode); + sxevf_mc_addr_sync(hw, netdev); + sxevf_uc_addr_sync(hw, netdev); + + spin_unlock_bh(&adapter->mbx_lock); + + return; +} + +void sxevf_sw_mtu_set(struct sxevf_adapter *adapter, u32 new_mtu) +{ + LOG_INFO_BDF("set sw mtu from %u to %u vf netdev mtu:%u\n", + adapter->sw_mtu, + new_mtu, + adapter->netdev->mtu); + + adapter->sw_mtu = new_mtu; + + return; +} + +u32 sxevf_sw_mtu_get(struct sxevf_adapter *adapter) +{ + u32 max_frame; + + if (adapter->sw_mtu == 0) { + max_frame = adapter->netdev->mtu + SXEVF_ETH_DEAD_LOAD; + } else { + max_frame = adapter->sw_mtu; + } + + LOG_DEBUG_BDF("sw mtu:%u vf netdev mtu:%u result:%u\n", + adapter->sw_mtu, + adapter->netdev->mtu, + max_frame); + + return max_frame; +} + +STATIC s32 sxevf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + u32 frame = new_mtu; + s32 ret; + + if (adapter->xdp_prog) { + LOG_DEV_WARN("%s %s xdp progarm can't change mtu.\n", + netdev_name(adapter->netdev), + dev_name(&adapter->pdev->dev)); + ret = -EPERM; + goto l_end; + } + + ret = sxevf_rx_max_frame_configure(adapter, frame); + if (ret) { + ret = -EINVAL; + LOG_ERROR_BDF("set max frame:%u fail.(err:%d)\n", frame, ret); + goto l_end; + } + + LOG_DEV_DEBUG("%s %s change mtu from %u to %u.\n", + netdev_name(adapter->netdev), + dev_name(&adapter->pdev->dev), + netdev->mtu, new_mtu); + + netdev->mtu = new_mtu; + if (netif_running(netdev)) { + LOG_INFO_BDF("change mtu to:%u, next to reinit.\n", new_mtu); + sxevf_hw_reinit(adapter); + } + +l_end: + return ret; +} + +static void sxevf_dcb_configure(struct sxevf_adapter *adapter) +{ + s32 ret; + u16 rx_ring; + u16 tx_ring; + u8 tc_num, default_tc, max_tx_num; + struct sxevf_hw *hw = &adapter->hw; + + ret = sxevf_ring_info_get(adapter, &tc_num, &default_tc, &max_tx_num); + if (ret) { + LOG_ERROR_BDF("get pf ring cfg info fail, use default_tc ring num." + "(err:%d)\n", ret); + goto l_end; + } + + if (tc_num > 1) { + tx_ring = 1; + rx_ring = tc_num; + adapter->tx_ring_ctxt.ring[0]->reg_idx = default_tc; + + if ((rx_ring != adapter->rx_ring_ctxt.num) || + (tx_ring != adapter->tx_ring_ctxt.num)) { + hw->mbx.interval = 0; + set_bit(SXEVF_RING_REASSIGN_REQUESTED, &adapter->monitor_ctxt.state); + } + } + +l_end: + return; +} + +static s32 sxevf_get_link_enable(struct sxevf_adapter *adapter) +{ + s32 ret = 0; + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_link_enable_msg msg = {}; + + bool enable = adapter->link.link_enable; + msg.msg_type = SXEVF_LINK_ENABLE_GET; + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&msg, + SXEVF_MSG_NUM(sizeof(msg))); + if(!ret && msg.msg_type == (SXEVF_LINK_ENABLE_GET | SXEVF_MSGTYPE_ACK)) { + adapter->link.link_enable = msg.link_enable; + if (enable && enable != adapter->link.link_enable) + LOG_MSG_INFO(drv, "VF is administratively disabled\n"); + } + LOG_INFO_BDF("vf link enable: %d\n", adapter->link.link_enable); + + return ret; +} + +int sxevf_open(struct net_device *netdev) +{ + int ret; + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + + if (test_bit(SXEVF_TESTING, &adapter->state)) { + ret = -EBUSY; + goto l_end; + } + + if (!adapter->irq_ctxt.ring_irq_num) { + LOG_ERROR_BDF("ring irq num zero.sxevf open fail.\n"); + ret = -ENOMEM; + goto l_end; + } + + if (test_bit(SXEVF_HW_STOP, &hw->state)) { + sxevf_reset(adapter); + + if (test_bit(SXEVF_HW_STOP, &hw->state)) { + ret = -SXEVF_ERR_RESET_FAILED; + LOG_DEV_ERR("open process reset vf still fail.(err:%d)\n", ret); + goto l_reset; + } + } + + netif_carrier_off(netdev); + + ret = sxevf_tx_configure(adapter); + if (ret) { + LOG_ERROR_BDF("tx config failed, ret=%d\n", ret); + goto l_reset; + } + + ret = sxevf_rx_configure(adapter); + if (ret) { + LOG_ERROR_BDF("rx config failed, reset and wait for next insmod\n"); + goto l_free_tx; + } + +#ifdef SXE_IPSEC_CONFIGURE + sxevf_ipsec_restore(adapter); +#endif + + sxevf_get_link_enable(adapter); + + ret = sxevf_irq_configure(adapter); + if (ret) { + LOG_ERROR_BDF("irq config failed, ret=%d\n", ret); + goto l_irq_err; + + } + + sxevf_dcb_configure(adapter); + + sxevf_save_reset_stats(adapter); + sxevf_last_counter_stats_init(adapter); + + netif_tx_start_all_queues(netdev); + + sxevf_task_timer_trigger(adapter); + + sxevf_dev_mac_addr_set(adapter); + + LOG_INFO_BDF("vf open success\n"); + + return 0; + +l_irq_err: + sxevf_rx_release(adapter); +l_free_tx: + sxevf_tx_release(adapter); + sxevf_irq_ctxt_exit(adapter); +l_reset: + sxevf_reset(adapter); +l_end: + return ret; +} + +static void sxevf_netif_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + return; +} + +static void sxevf_hw_disable(struct sxevf_adapter *adapter) +{ + sxevf_hw_rx_disable(adapter); + + usleep_range(SXEVF_HW_DISABLE_SLEEP_TIME_MIN, + SXEVF_HW_DISABLE_SLEEP_TIME_MAX); + + sxevf_hw_irq_disable(adapter); + + sxevf_hw_tx_disable(adapter); + + return; +} + +static void sxevf_txrx_stop(struct sxevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + sxevf_netif_stop(netdev); + + sxevf_hw_disable(adapter); + + sxevf_napi_disable(adapter); + + return ; +} + +static void sxevf_txrx_ring_clean(struct sxevf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sxevf_tx_ring_buffer_clean(adapter->tx_ring_ctxt.ring[i]); + } + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + sxevf_tx_ring_buffer_clean(adapter->xdp_ring_ctxt.ring[i]); + } + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxevf_rx_ring_buffer_clean(adapter->rx_ring_ctxt.ring[i]); + } + + return ; +} + +void sxevf_down(struct sxevf_adapter *adapter) +{ + if (test_and_set_bit(SXEVF_DOWN, &adapter->state)) { + goto l_end; + } + + sxevf_txrx_stop(adapter); + + del_timer_sync(&adapter->monitor_ctxt.timer); + + if (!pci_channel_offline(adapter->pdev)) { + sxevf_reset(adapter); + } + + sxevf_txrx_ring_clean(adapter); + + LOG_INFO_BDF("sxevf down done\n"); + +l_end: + return; +} + +void sxevf_up(struct sxevf_adapter *adapter) +{ + sxevf_dcb_configure(adapter); + +#ifdef SXE_IPSEC_CONFIGURE + sxevf_ipsec_restore(adapter); +#endif + + sxevf_hw_tx_configure(adapter); + + sxevf_hw_rx_configure(adapter); + + sxevf_dev_mac_addr_set(adapter); + + sxevf_get_link_enable(adapter); + + sxevf_hw_irq_configure(adapter); + + sxevf_save_reset_stats(adapter); + sxevf_last_counter_stats_init(adapter); + + netif_tx_start_all_queues(adapter->netdev); + + sxevf_task_timer_trigger(adapter); + + LOG_INFO_BDF("up finish\n"); + return ; +} + +void sxevf_hw_reinit(struct sxevf_adapter *adapter) +{ + WARN_ON(in_interrupt()); + + while (test_and_set_bit(SXEVF_RESETTING, &adapter->state)) { + msleep(1); + } + + sxevf_down(adapter); + pci_set_master(adapter->pdev); + sxevf_up(adapter); + clear_bit(SXEVF_RESETTING, &adapter->state); + + LOG_INFO_BDF("reinit finish\n"); + return; +} + +static void sxevf_resource_release(struct sxevf_adapter *adapter) +{ + sxevf_irq_release(adapter); + + sxevf_rx_release(adapter); + + sxevf_tx_release(adapter); + + return; +} + +void sxevf_terminate(struct sxevf_adapter *adapter) +{ + sxevf_down(adapter); + + sxevf_resource_release(adapter); +} + +int sxevf_close(struct net_device *netdev) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + if (netif_device_present(netdev)) { + sxevf_terminate(adapter); + } + + LOG_INFO_BDF("close finish\n"); + return 0; +} + +int sxevf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + int ret; + + LOG_DEBUG_BDF("add vlan[%u] in vfta\n", vid); + + spin_lock_bh(&adapter->mbx_lock); + ret = sxe_vf_filter_array_vid_update(hw, vid, true); + spin_unlock_bh(&adapter->mbx_lock); + + if (ret == -SXEVF_ERR_MSG_HANDLE_ERR) { + LOG_ERROR_BDF("pf vf mailbox msg handle error\n"); + ret = -EACCES; + goto l_ret; + } + + if (ret != -SXEVF_ERR_MSG_HANDLE_ERR && ret != 0) { + LOG_ERROR_BDF("pf response error ret = %d\n", ret); + ret = -EIO; + goto l_ret; + } + + set_bit(vid, adapter->active_vlans); + +l_ret: + return ret; +} + +int sxevf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_hw *hw = &adapter->hw; + int ret; + + spin_lock_bh(&adapter->mbx_lock); + + LOG_DEBUG_BDF("delete vlan[%u] in vfta\n", vid); + ret = sxe_vf_filter_array_vid_update(hw, vid, false); + + if (ret == -SXEVF_ERR_MSG_HANDLE_ERR) { + LOG_ERROR_BDF("pf vf mailbox msg handle error\n"); + ret = -EACCES; + } else if (ret != -SXEVF_ERR_MSG_HANDLE_ERR && ret != 0) { + LOG_ERROR_BDF("pf response error ret = %d\n", ret); + ret = -EIO; + } + + spin_unlock_bh(&adapter->mbx_lock); + + clear_bit(vid, adapter->active_vlans); + + return ret; +} + +static netdev_features_t sxevf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + u32 network_hdr_len, mac_hdr_len; + struct sxevf_adapter *adapter = netdev_priv(dev); + + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > SXEVF_MAX_MAC_HDR_LEN)) { + LOG_DEBUG_BDF("mac_hdr_len=%u > %u\n", + mac_hdr_len, SXEVF_MAX_MAC_HDR_LEN); + SKB_DUMP(skb); + features &= ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + goto l_ret; + } + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > SXEVF_MAX_NETWORK_HDR_LEN)) { + LOG_DEBUG_BDF("network_hdr_len=%u > %u\n", + network_hdr_len, SXEVF_MAX_NETWORK_HDR_LEN); + SKB_DUMP(skb); + features &= ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + goto l_ret; + } + + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { + features &= ~NETIF_F_TSO; + } + +l_ret: + return features; +} + +void sxevf_update_stats(struct sxevf_adapter *adapter) +{ + u32 i; + struct sxevf_hw *hw = &adapter->hw; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 alloc_rx_page = 0, hw_csum_rx_error = 0; + struct sxevf_ring **rx_ring = adapter->rx_ring_ctxt.ring; + + if (test_bit(SXEVF_DOWN, &adapter->state) || + test_bit(SXEVF_RESETTING, &adapter->state)) { + LOG_WARN_BDF("adapter state:0x%lx.\n", + adapter->state); + goto l_end; + } + + hw->stat.ops->packet_stats_get(hw, &adapter->stats.hw); + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + alloc_rx_page += rx_ring[i]->rx_stats.alloc_rx_page; + alloc_rx_page_failed += rx_ring[i]->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring[i]->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring[i]->rx_stats.csum_err; + } + + adapter->stats.sw.alloc_rx_page = alloc_rx_page; + adapter->stats.sw.alloc_rx_page_failed = alloc_rx_page_failed; + adapter->stats.sw.alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->stats.sw.hw_csum_rx_error = hw_csum_rx_error; + +l_end: + return; +} + +static void sxevf_ring_stats64_get(struct rtnl_link_stats64 *stats, + struct sxevf_ring *ring, + bool is_rx) +{ + u32 start; + u64 bytes, packets; + + if (ring) { + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + if (true == is_rx) { + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } else { + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + + return; +} + +#ifdef NO_VOID_NDO_GET_STATS64 +static struct rtnl_link_stats64 * +sxevf_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +#else +static void sxevf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif +{ + u32 i; + struct sxevf_ring *ring; + struct sxevf_adapter *adapter = netdev_priv(netdev); + + sxevf_update_stats(adapter); + + stats->multicast = adapter->stats.hw.vfmprc - adapter->stats.hw.base_vfmprc; + stats->multicast += adapter->stats.hw.saved_reset_vfmprc; + + rcu_read_lock(); + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ring = adapter->rx_ring_ctxt.ring[i]; + sxevf_ring_stats64_get(stats, ring, true); + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ring = adapter->tx_ring_ctxt.ring[i]; + sxevf_ring_stats64_get(stats, ring, false); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + ring = adapter->xdp_ring_ctxt.ring[i]; + sxevf_ring_stats64_get(stats, ring, false); + } + + rcu_read_unlock(); + +#ifdef NO_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +static const struct net_device_ops sxevf_netdev_ops = { + .ndo_open = sxevf_open, + .ndo_stop = sxevf_close, + .ndo_set_mac_address = sxevf_mac_addr_set, + .ndo_start_xmit = sxevf_xmit, + .ndo_set_rx_mode = sxevf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, +#ifdef HAVE_NET_DEVICE_EXTENDED + .ndo_size = sizeof(struct net_device_ops), + .extended.ndo_change_mtu = sxevf_change_mtu, +#else + .ndo_change_mtu = sxevf_change_mtu, +#endif + .ndo_vlan_rx_add_vid = sxevf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = sxevf_vlan_rx_kill_vid, + .ndo_tx_timeout = sxevf_tx_timeout, + .ndo_features_check = sxevf_features_check, + + .ndo_get_stats64 = sxevf_get_stats64, +#ifdef HAVE_XDP_SUPPORT + .ndo_bpf = sxevf_xdp, +#endif +}; + +static void sxevf_netdev_ops_init(struct net_device *netdev) +{ + netdev->netdev_ops = &sxevf_netdev_ops; + return; +} + +STATIC void sxevf_netdev_feature_init(struct net_device *netdev) +{ + netdev->features = NETIF_F_SG | + NETIF_F_SCTP_CRC | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; + + netdev->gso_partial_features = SXEVF_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GSO_PARTIAL| + SXEVF_GSO_PARTIAL_FEATURES; + +#ifdef SXE_IPSEC_CONFIGURE + netdev->features |= SXEVF_ESP_FEATURES; + netdev->hw_enc_features |= SXEVF_ESP_FEATURES; +#endif + + netdev->hw_features |= netdev->features; + + if (dma_get_mask(netdev->dev.parent) == DMA_BIT_MASK(SXEVF_DMA_BIT_WIDTH_64)) { + netdev->features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + + netdev->mpls_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM; + netdev->mpls_features |= SXEVF_GSO_PARTIAL_FEATURES; + + netdev->hw_enc_features |= netdev->vlan_features; + + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + return; +} + +static void sxevf_netdev_name_init(struct net_device *netdev, + struct pci_dev *pdev) +{ + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + return; +} + +#ifndef NO_NETDEVICE_MIN_MAX_MTU +static void sxevf_netdev_mtu_init(struct sxevf_adapter *adapter) +{ + u32 max_mtu; + struct net_device *netdev = adapter->netdev; + + if (adapter->hw.board_type == SXE_BOARD_VF_HV) { + max_mtu = ETH_DATA_LEN; + } else { + max_mtu = SXEVF_JUMBO_FRAME_SIZE_MAX - SXEVF_ETH_DEAD_LOAD; + } +#ifdef HAVE_NET_DEVICE_EXTENDED + netdev->extended->min_mtu = ETH_MIN_MTU; + netdev->extended->max_mtu = max_mtu; + + LOG_INFO("max_mtu:%u min_mtu:%u.\n", + netdev->extended->max_mtu, netdev->extended->min_mtu); +#else + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = max_mtu; + + LOG_INFO("max_mtu:%u min_mtu:%u.\n", netdev->max_mtu, netdev->min_mtu); +#endif + return; +} +#endif + +static void sxevf_netdev_priv_flags_init(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + return; +} + +void sxevf_netdev_init(struct sxevf_adapter *adapter, struct pci_dev *pdev) +{ + struct net_device *netdev = adapter->netdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + sxevf_netdev_ops_init(netdev); + + sxevf_netdev_name_init(netdev, pdev); + + sxevf_netdev_feature_init(netdev); + + sxevf_netdev_priv_flags_init(netdev); + +#ifndef NO_NETDEVICE_MIN_MAX_MTU + sxevf_netdev_mtu_init(adapter); +#endif + + sxevf_ethtool_ops_set(netdev); + + return ; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.h new file mode 100644 index 000000000000..2817cc452b6f --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_netdev.h @@ -0,0 +1,44 @@ +#ifndef __SXEVF_NETDEV_H__ +#define __SXEVF_NETDEV_H__ + +#include +#include "sxevf.h" + +#define SXEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +int sxevf_open(struct net_device *netdev); + +void sxevf_netdev_init(struct sxevf_adapter *adapter, struct pci_dev *pdev); + +void sxevf_reset(struct sxevf_adapter *adapter); + +void sxevf_terminate(struct sxevf_adapter *adapter); + +void sxevf_down(struct sxevf_adapter *adapter); + +int sxevf_close(struct net_device *netdev); + +void sxevf_up(struct sxevf_adapter *adapter); + +void sxevf_hw_reinit(struct sxevf_adapter *adapter); + +void sxevf_set_rx_mode(struct net_device *netdev); + +int sxevf_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); + +int sxevf_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); + +void sxevf_update_stats(struct sxevf_adapter *adapter); + +u32 sxevf_sw_mtu_get(struct sxevf_adapter *adapter); + +void sxevf_sw_mtu_set(struct sxevf_adapter *adapter, u32 new_mtu); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_pci.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_pci.h new file mode 100644 index 000000000000..f75d77e41aaa --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_pci.h @@ -0,0 +1,14 @@ +#ifndef _SXEVF_PCI_H_ +#define _SXEVF_PCI_H_ + +#define PCI_VENDOR_ID_STARS 0x1FF2 +#define SXEVF_DEV_ID_ASIC 0x10a2 +#define SXEVF_DEV_ID_ASIC_HV 0x10a3 + +#define SXEVF_DMA_BIT_WIDTH_64 64 +#define SXEVF_DMA_BIT_WIDTH_32 32 + +#define SXEVF_READ_CFG_WORD_FAILED 0xFFFFU + +#endif + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_regs.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_regs.h new file mode 100644 index 000000000000..6ab6d98dfc1e --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_regs.h @@ -0,0 +1,116 @@ + +#ifndef __SXEVF_REGS_H__ +#define __SXEVF_REGS_H__ + +#define SXEVF_REG_READ_FAIL 0xffffffffU +#define SXEVF_REG_READ_RETRY 5 + +#define SXE_VFLINKS_UP 0x00000008 +#define SXE_VFLINKS_SPEED 0x00000006 +#define SXE_VFLINKS_SPEED_10G 0x00000006 +#define SXE_VFLINKS_SPEED_1G 0x00000004 +#define SXE_VFLINKS_SPEED_100 0x00000002 + +#define SXE_VFCTRL 0x00000 +#define SXE_VFSTATUS 0x00008 +#define SXE_VFLINKS 0x00018 +#define SXE_VFFRTIMER 0x00048 +#define SXE_VFRXMEMWRAP 0x03190 +#define SXE_VFEICR 0x00100 +#define SXE_VFEICS 0x00104 +#define SXE_VFEIMS 0x00108 +#define SXE_VFEIMC 0x0010C +#define SXE_VFEIAM 0x00114 +#define SXE_VFEITR(x) (0x00820 + (4 * (x))) +#define SXE_VFIVAR(x) (0x00120 + (4 * (x))) +#define SXE_VFIVAR_MISC 0x00140 +#define SXE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define SXE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define SXE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define SXE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define SXE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define SXE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define SXE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define SXE_VFLROCTL(x) (0x0102C + (0x40 * (x))) +#define SXE_VFPSRTYPE 0x00300 +#define SXE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define SXE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define SXE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define SXE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define SXE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define SXE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define SXE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define SXE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define SXE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define SXE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define SXE_VFGPRC 0x0101C +#define SXE_VFGPTC 0x0201C +#define SXE_VFGORC_LSB 0x01020 +#define SXE_VFGORC_MSB 0x01024 +#define SXE_VFGOTC_LSB 0x02020 +#define SXE_VFGOTC_MSB 0x02024 +#define SXE_VFMPRC 0x01034 +#define SXE_VFMRQC 0x3000 +#define SXE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define SXE_VFRETA(x) (0x3200 + ((x) * 4)) + +#define SXEVF_VFEIMC_IRQ_MASK (7) +#define SXEVF_IVAR_ALLOC_VALID (0x80) + +#define SXEVF_EITR_CNT_WDIS (0x80000000) +#define SXEVF_EITR_ITR_MASK (0x00000FF8) +#define SXEVF_EITR_ITR_SHIFT (2) +#define SXEVF_EITR_ITR_MAX (SXEVF_EITR_ITR_MASK >> SXEVF_EITR_ITR_SHIFT) + +#define SXE_VFRXDCTL_ENABLE 0x02000000 +#define SXE_VFTXDCTL_ENABLE 0x02000000 +#define SXE_VFCTRL_RST 0x04000000 + +#define SXEVF_RXDCTL_ENABLE 0x02000000 +#define SXEVF_RXDCTL_VME 0x40000000 + +#define SXEVF_PSRTYPE_RQPL_SHIFT 29 + +#define SXEVF_SRRCTL_DROP_EN 0x10000000 +#define SXEVF_SRRCTL_DESCTYPE_DATA_ONEBUF 0x02000000 +#define SXEVF_SRRCTL_BSIZEPKT_SHIFT (10) +#define SXEVF_SRRCTL_BSIZEHDRSIZE_SHIFT (2) +#define SXEVF_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define SXEVF_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define SXE_VFMAILBOX 0x002FC +#define SXE_VFMBMEM 0x00200 + +#define SXE_VFMAILBOX_REQ 0x00000001 +#define SXE_VFMAILBOX_ACK 0x00000002 +#define SXE_VFMAILBOX_VFU 0x00000004 +#define SXE_VFMAILBOX_PFU 0x00000008 +#define SXE_VFMAILBOX_PFSTS 0x00000010 +#define SXE_VFMAILBOX_PFACK 0x00000020 +#define SXE_VFMAILBOX_RSTI 0x00000040 +#define SXE_VFMAILBOX_RSTD 0x00000080 +#define SXE_VFMAILBOX_RC_BIT 0x000000B0 + +#define SXEVF_TDBAL(_i) (0x02000 + ((_i) * 0x40)) +#define SXEVF_TDBAH(_i) (0x02004 + ((_i) * 0x40)) +#define SXEVF_TDLEN(_i) (0x02008 + ((_i) * 0x40)) +#define SXEVF_TDH(_i) (0x02010 + ((_i) * 0x40)) +#define SXEVF_TDT(_i) (0x02018 + ((_i) * 0x40)) +#define SXEVF_TXDCTL(_i) (0x02028 + ((_i) * 0x40)) +#define SXEVF_TDWBAL(_i) (0x02038 + ((_i) * 0x40)) +#define SXEVF_TDWBAH(_i) (0x0203C + ((_i) * 0x40)) + +#define SXEVF_TXDCTL_SWFLSH (0x02000000) +#define SXEVF_TXDCTL_ENABLE (0x02000000) + +#define SXEVF_VFGPRC 0x0101C +#define SXEVF_VFGPTC 0x0201C +#define SXEVF_VFGORC_LSB 0x01020 +#define SXEVF_VFGORC_MSB 0x01024 +#define SXEVF_VFGOTC_LSB 0x02020 +#define SXEVF_VFGOTC_MSB 0x02024 +#define SXEVF_VFMPRC 0x01034 + +#define SXEVF_EICR_MASK 0x07 + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.c new file mode 100644 index 000000000000..1b525de4a169 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.c @@ -0,0 +1,232 @@ +#include +#include + +#include "sxevf.h" +#include "sxe_log.h" +#include "sxevf_msg.h" + +void sxevf_ring_feature_init(struct sxevf_adapter *adapter) +{ + u16 rss; + + rss = min_t(u16, SXEVF_RSS_RING_NUM_MAX, num_online_cpus()); + adapter->ring_f.rss_limit = rss; + + adapter->tx_ring_ctxt.depth = SXEVF_TX_DEFAULT_DESC_CNT; + adapter->rx_ring_ctxt.depth = SXEVF_RX_DEFAULT_DESC_CNT; + + adapter->xdp_ring_ctxt.depth = 0; + LOG_INFO_BDF("rss_limit:%u descriptor cnt tx:%u rx:%u xdp:%u.\n", + adapter->ring_f.rss_limit, + adapter->tx_ring_ctxt.depth, + adapter->rx_ring_ctxt.depth, + adapter->xdp_ring_ctxt.depth); + + return; +} + +s32 sxevf_ring_info_get(struct sxevf_adapter *adapter, + u8 *tc_num, u8 *default_tc, u8 *max_tx_num) +{ + struct sxevf_hw *hw = &adapter->hw; + struct sxevf_ring_info_msg req = {}; + s32 ret; + + req.msg_type = SXEVF_RING_INFO_GET; + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_send_and_rcv_msg(hw, (u32 *)&req, + SXEVF_MSG_NUM(sizeof(req))); + spin_unlock_bh(&adapter->mbx_lock); + + if (ret) { + LOG_ERROR_BDF("msg:0x%x send or rcv reply failed.(err:%d)\n", + req.msg_type, ret); + goto l_out; + } + + if (req.msg_type != (SXEVF_MSGTYPE_ACK | SXEVF_RING_INFO_GET)) { + ret = -SXEVF_ERR_REPLY_INVALID; + LOG_WARN_BDF("msg:0x%x not expected.(err:%d)\n", req.msg_type, ret); + goto l_out; + } + + if ((req.max_tx_num == 0) || + (req.max_tx_num > SXEVF_TXRX_RING_NUM_MAX)) { + req.max_tx_num = SXEVF_TXRX_RING_NUM_MAX; + } + *max_tx_num = req.max_tx_num; + + if ((req.max_rx_num == 0) || + (req.max_rx_num > SXEVF_TXRX_RING_NUM_MAX)) { + req.max_rx_num = SXEVF_TXRX_RING_NUM_MAX; + } + + if (req.tc_num > req.max_rx_num) { + req.tc_num = SXEVF_DEFAULT_TC_NUM; + } + *tc_num = req.tc_num; + + if (req.default_tc > req.max_tx_num) { + req.default_tc = 0; + } + *default_tc = req.default_tc; + + LOG_INFO_BDF("ring info max_tx_num:%u max_rx_num:%u " + "tc_num:%u default_tc:%u.\n", + req.max_tx_num, req.max_rx_num, req.tc_num, req.default_tc); + +l_out: + return ret; +} + +void sxevf_ring_num_set(struct sxevf_adapter *adapter) +{ + u8 tc_num; + u8 default_tc; + u8 max_tx_num; + s32 ret; + + adapter->rx_ring_ctxt.num = SXEVF_TXRX_RING_NUM_DEFAULT; + adapter->tx_ring_ctxt.num = SXEVF_TXRX_RING_NUM_DEFAULT; + adapter->xdp_ring_ctxt.num = 0; + + ret = sxevf_ring_info_get(adapter, &tc_num, &default_tc, &max_tx_num); + if (ret) { + LOG_ERROR_BDF("get pf ring cfg info fail, use default_tc ring num." + "(err:%d)\n", ret); + goto l_out; + } + + if (tc_num > 1) { + adapter->cap |= SXEVF_DCB_ENABLE; + adapter->ring_f.tc_per_pool = tc_num; + adapter->rx_ring_ctxt.num = tc_num; + } else { + u16 rss = adapter->ring_f.rss_limit; + + switch(adapter->mbx_version) { + case SXEVF_MBX_API_11: + case SXEVF_MBX_API_12: + case SXEVF_MBX_API_13: + case SXEVF_MBX_API_14: + if (adapter->xdp_prog && (max_tx_num == rss)) { + rss = rss > 3 ? 2 : 1; + } + + adapter->tx_ring_ctxt.num = rss; + adapter->rx_ring_ctxt.num = rss; + adapter->xdp_ring_ctxt.num = adapter->xdp_prog ? rss : 0; + default: + break; + } + } + + LOG_INFO_BDF("cap = 0x%x mbx version:%u rss:%u rx_ring_num:%d " + "tx_ring_num:%d xdp_ring_num:%d tc_num:%u\n", + adapter->cap, + adapter->mbx_version, + adapter->ring_f.rss_limit, + adapter->rx_ring_ctxt.num, + adapter->tx_ring_ctxt.num, + adapter->xdp_ring_ctxt.num, + tc_num); +l_out: + return; +} + + +static void sxevf_add_ring(struct sxevf_ring *ring, + struct sxevf_list *head) +{ + ring->next = head->next; + head->next = ring; + head->cnt++; + + return; +} + +void sxevf_tx_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 txr_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx) +{ + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxevf_ring *ring = &(irq_data->ring[base]); + u16 txr_idx = ring_idx; + + while(txr_cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + sxevf_add_ring(ring, &irq_data->tx.list); + irq_data->tx.irq_rate.next_update = jiffies + 1; + + ring->depth = adapter->tx_ring_ctxt.depth; + ring->idx = txr_idx; + ring->reg_idx = reg_idx; + + adapter->tx_ring_ctxt.ring[txr_idx] = ring; + txr_cnt--; + txr_idx++; + ring++; + } + + return; +} + +void sxevf_xdp_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 xdp_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx) +{ + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxevf_ring *ring = &(irq_data->ring[base]); + u16 xdp_idx = ring_idx; + + while(xdp_cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + irq_data->tx.xdp_ring = ring; + + ring->depth = adapter->tx_ring_ctxt.depth; + ring->idx = xdp_idx; + ring->reg_idx = reg_idx; + vf_set_ring_xdp(ring); + + adapter->xdp_ring_ctxt.ring[xdp_idx] = ring; + + xdp_cnt--; + xdp_idx++; + ring++; + } + + return; +} + +void sxevf_rx_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 rxr_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx) +{ + struct sxevf_irq_data *irq_data = adapter->irq_ctxt.irq_data[irq_idx]; + struct sxevf_ring *ring = &(irq_data->ring[base]); + u16 rxr_idx = ring_idx; + + while(rxr_cnt) { + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + ring->irq_data = irq_data; + + sxevf_add_ring(ring, &irq_data->rx.list); + irq_data->rx.irq_rate.next_update = jiffies + 1; + + ring->depth = adapter->rx_ring_ctxt.depth; + ring->idx = rxr_idx; + ring->reg_idx = rxr_idx; + + adapter->rx_ring_ctxt.ring[rxr_idx] = ring; + rxr_cnt--; + rxr_idx++; + ring++; + } + + return; +} + diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.h new file mode 100644 index 000000000000..0f050f1d6940 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_ring.h @@ -0,0 +1,301 @@ +#ifndef __SXEVF_RING_H__ +#define __SXEVF_RING_H__ + +#include "sxe_compat.h" +#include +#ifndef HAVE_NO_XDP_BUFF_RXQ +#include +#endif + +struct sxevf_adapter; +struct sxevf_irq_data; + +#define SXEVF_LONG_32BITS 32 +#define SXEVF_PAGE_SIZE_4KB 4096 +#define SXEVF_PAGE_SIZE_8KB 8192 +#define SXEVF_PAGE_SIZE_64KB 65536 + +#define SXEVF_DESC_ALIGN_4K 4096 + +#define SXEVF_BUFFER_PER_4KPAGE 2 + +#define SXEVF_TXRX_RING_NUM_DEFAULT 1 + +#define SXEVF_DESC_CNT_MIN 64 +#define SXEVF_DESC_CNT_MAX 4096 +#define SXEVF_TX_DEFAULT_DESC_CNT 1024 +#define SXEVF_RX_DEFAULT_DESC_CNT 512 +#define SXEVF_REQ_DESCRIPTOR_MULTIPLE 8 +#define SXEVF_DEFAULT_TC_NUM 1 + +#define SXEVF_RSS_RING_NUM_MAX 2 + +enum sxevf_ring_state { + SXEVF_RX_3K_BUFFER, + SXEVF_RX_BUILD_SKB_ENABLED, + SXEVF_TX_XDP_RING, + SXEVF_TX_DETECT_HANG, + SXEVF_HANG_CHECK_ARMED, + SXEVF_TX_XDP_RING_PRIMED +}; + +#define vf_ring_is_xdp(ring) test_bit(SXEVF_TX_XDP_RING, &(ring)->state) +#define vf_set_ring_xdp(ring) set_bit(SXEVF_TX_XDP_RING, &(ring)->state) +#define vf_clear_ring_xdp(ring) clear_bit(SXEVF_TX_XDP_RING, &(ring)->state) + +#define sxevf_for_each_ring(post, head) \ + for (post = (head).next; post != NULL; post = post->next) + +#define SXEVF_TX_DESC(R, i) \ + (&(((union sxevf_tx_data_desc *)((R)->desc.base_addr))[i])) +#define SXEVF_TX_CTXTDESC(R, i) \ + (&(((struct sxevf_tx_context_desc *)((R)->desc.base_addr))[i])) + +#define SXEVF_RX_DESC(R, i) \ + (&(((union sxevf_rx_data_desc *)((R)->desc.base_addr))[i])) + +#define SXEVF_RXD_STAT_LB 0x40000 +#define SXEVF_RXD_STAT_L4CS 0x20 +#define SXEVF_RXD_STAT_IPCS 0x40 +#define SXEVF_RXD_STAT_VP 0x08 +#define SXEVF_RXD_STAT_EOP 0x02 + +#define SXEVF_TX_POPTS_IXSM 0x01 +#define SXEVF_TX_POPTS_TXSM 0x02 +#define SXEVF_TXD_POPTS_SHIFT 8 +#define SXEVF_TXD_POPTS_IXSM (SXEVF_TX_POPTS_IXSM << SXEVF_TXD_POPTS_SHIFT) +#define SXEVF_TXD_POPTS_TXSM (SXEVF_TX_POPTS_TXSM << SXEVF_TXD_POPTS_SHIFT) +#define SXEVF_TXD_IDX (1u << 4) +#define SXEVF_TXD_CC 0x00000080 + +#define SXEVF_TX_NON_DATA_DESC_NUM 3 +#define SXEVF_TX_DESC_PAYLEN_SHIFT 14 +#define SXEVF_DATA_PER_DESC_SIZE_SHIFT 14 +#define SXEVF_DATA_PER_DESC_SIZE_MAX (1u << SXEVF_DATA_PER_DESC_SIZE_SHIFT) +#define SXEVF_TX_DESC_USE_COUNT(S) DIV_ROUND_UP((S), SXEVF_DATA_PER_DESC_SIZE_MAX) + +#define SXEVF_TX_DESC_NEEDED (long int)(MAX_SKB_FRAGS + 4) +#define SXEVF_TX_WAKE_THRESHOLD (SXEVF_TX_DESC_NEEDED * 2) +#define SXEVF_TX_DESC_EOP_MASK 0x01000000 +#define SXEVF_TX_DESC_RS_MASK 0x08000000 +#define SXEVF_TX_DESC_STAT_DD 0x00000001 +#define SXEVF_TX_DESC_CMD (SXEVF_TX_DESC_EOP_MASK | SXEVF_TX_DESC_RS_MASK) +#define SXEVF_TX_DESC_TYPE_DATA 0x00300000 +#define SXEVF_TX_DESC_DEXT 0x20000000 +#define SXEVF_TX_DESC_IFCS 0x02000000 +#define SXEVF_TX_DESC_VLE 0x40000000 + +#define SXEVF_TXD_DCMD_VLE SXEVF_TX_DESC_VLE +#define SXEVF_TXD_DTYP_CTXT 0x00200000 +#define SXEVF_TXD_DCMD_TSE 0x80000000 + +#define SXEVF_RXD_STAT_SECP 0x20000 + +#define SXEVF_RXDADV_PKTTYPE_NONE 0x00000000 +#define SXEVF_RXDADV_PKTTYPE_IPV4 0x00000010 +#define SXEVF_RXDADV_PKTTYPE_IPV4_EX 0x00000020 +#define SXEVF_RXDADV_PKTTYPE_IPV6 0x00000040 +#define SXEVF_RXDADV_PKTTYPE_IPV6_EX 0x00000080 +#define SXEVF_RXDADV_PKTTYPE_TCP 0x00000100 +#define SXEVF_RXDADV_PKTTYPE_UDP 0x00000200 +#define SXEVF_RXDADV_PKTTYPE_SCTP 0x00000400 +#define SXEVF_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 +#define SXEVF_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 +#define SXEVF_RXDADV_PKTTYPE_LINKSEC 0x00004000 + +#define SXEVF_IP_HEAD_LEN_UNIT 4 + +#define SXEVF_RXDADV_ERR_L4E 0x40000000 +#define SXEVF_RXDADV_ERR_IPE 0x80000000 + +#define SXEVF_TX_CTXTD_MACLEN_SHIFT 9 +#define SXEVF_TX_CTXTD_VLAN_SHIFT 16 +#define SXEVF_TX_CTXTD_VLAN_MASK 0xffff0000 +#define SXEVF_TX_CTXTD_MACLEN_MASK 0x0000fE00 + +#define SXEVF_TX_HANG_PROC_ACTIVE(ring) \ + set_bit(SXEVF_TX_DETECT_HANG, &(ring)->state) +#define SXEVF_TX_HANG_CHECK_COMPLETE(ring) \ + clear_bit(SXEVF_TX_DETECT_HANG, &(ring)->state) +#define SXEVF_DETECT_TX_HANG_NEED(ring) \ + test_bit(SXEVF_TX_DETECT_HANG, &(ring)->state) + +struct sxevf_ring_stats { + u64 packets; + u64 bytes; +}; + +struct sxevf_tx_ring_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; +}; + +struct sxevf_rx_ring_stats { + u64 alloc_rx_page; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 csum_err; +}; + +struct sxevf_ring_desc { + void *base_addr; + u8 __iomem *tail; + dma_addr_t dma; +}; +struct sxevf_ring { + struct sxevf_ring *next; + struct sxevf_irq_data *irq_data; + struct net_device *netdev; + struct device *dev; + struct sk_buff *skb; + + u8 idx; + u8 reg_idx; + u8 tc_idx; + + unsigned long state; + + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + u16 depth; + u32 size; + struct sxevf_ring_desc desc; + + union { + struct sxevf_tx_buffer *tx_buffer_info; + struct sxevf_rx_buffer *rx_buffer_info; + }; + + struct bpf_prog *xdp_prog; +#ifndef HAVE_NO_XDP_BUFF_RXQ + struct xdp_rxq_info xdp_rxq; +#endif + + struct sxevf_ring_stats stats; + struct u64_stats_sync syncp; + union { + struct sxevf_tx_ring_stats tx_stats; + struct sxevf_rx_ring_stats rx_stats; + }; + +} ____cacheline_internodealigned_in_smp; + +struct sxevf_ring_feature { + u16 rss_limit; + union { + u16 tc_per_pool; + u16 ring_per_pool; + }; +} ____cacheline_internodealigned_in_smp; + +struct sxevf_ring_context { + u16 max_tx_num; + u16 max_rx_num; + u16 num; + u16 depth; + struct sxevf_ring *ring[SXEVF_TXRX_RING_NUM_MAX]; +}; + +struct sxevf_tx_buffer { + union sxevf_tx_data_desc *next_to_watch; + unsigned long time_stamp; + + union { + struct sk_buff *skb; + void *data; + }; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_features; +}; + +union sxevf_tx_data_desc { + struct { + __le64 buffer_addr; + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +struct sxevf_tx_context_desc { + __le32 vlan_macip_lens; + __le32 sa_idx; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +struct sxevf_rx_buffer { + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > SXEVF_LONG_32BITS) || (PAGE_SIZE >= SXEVF_PAGE_SIZE_64KB) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +union sxevf_rx_data_desc { + struct { + __le64 pkt_addr; + __le64 hdr_addr; + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; + struct { + __le16 ip_id; + __le16 csum; + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; + __le16 length; + __le16 vlan; + } upper; + } wb; +}; + +static inline __le32 sxevf_status_err_check(union sxevf_rx_data_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +s32 sxevf_ring_info_get(struct sxevf_adapter *adapter, + u8 *tc_num, u8 *default_tc, u8 *max_tx_num); + +void sxevf_tx_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 txr_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx); + +void sxevf_xdp_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 xdp_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx); + +void sxevf_rx_ring_init(struct sxevf_adapter *adapter, u16 base, + u16 rxr_cnt, u16 ring_idx, u16 irq_idx, u16 reg_idx); + +void sxevf_ring_num_set(struct sxevf_adapter *adapter); + +void sxevf_ring_feature_init(struct sxevf_adapter *adapter); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.c new file mode 100644 index 000000000000..a51fe749fc27 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.c @@ -0,0 +1,1182 @@ +#include +#include + +#include "sxevf.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxe_log.h" +#include "sxevf_hw.h" +#include "sxevf_rx_proc.h" +#include "sxevf_netdev.h" +#include "sxevf_pci.h" +#include "sxevf_msg.h" +#include "sxevf_csum.h" +#include "sxevf_ipsec.h" +#include "sxevf_debug.h" +#include "sxevf_xdp.h" + +#ifdef XDP_RXQ_INFO_REQ_API_NEED_3_PARAMS +static inline int xdp_rxq_info_reg_compat(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, + u32 queue_index, unsigned int __always_unused napi_id) +{ + return xdp_rxq_info_reg(xdp_rxq, dev, queue_index); +} + +#define xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id) \ + xdp_rxq_info_reg_compat(xdp_rxq, dev, queue_index, napi_id) +#endif + +static s32 sxevf_rx_ring_alloc(struct sxevf_ring *ring) +{ + s32 ret; + u32 size; + union sxevf_rx_data_desc *desc; + struct sxevf_adapter *adapter = netdev_priv(ring->netdev); + + size = sizeof(struct sxevf_rx_buffer) * ring->depth; + ring->rx_buffer_info = vmalloc(size); + if (!ring->rx_buffer_info) { + LOG_ERROR_BDF("ring[%u] unable to allocate memory for" + "the rx_buffer_info\n", ring->idx); + ret = -ENOMEM; + goto l_rx_buf_alloc_failed; + } + + memset(ring->rx_buffer_info, 0, + sizeof(struct sxevf_rx_buffer) * ring->depth); + + u64_stats_init(&ring->syncp); + + ring->size = ring->depth * sizeof(union sxevf_rx_data_desc); + ring->size = ALIGN(ring->size, SXEVF_DESC_ALIGN_4K); + + ring->desc.base_addr = dma_alloc_coherent(ring->dev, ring->size, + &ring->desc.dma, GFP_KERNEL); + if (!ring->desc.base_addr) { + LOG_ERROR_BDF("ring[%u] unable to allocate memory for the descriptor\n", + ring->idx); + ret = -ENOMEM; + goto l_base_addr_alloc_failed; + } + + desc = SXEVF_RX_DESC(ring, 0); + desc->wb.upper.length = 0; + + ring->next_to_clean = 0; + ring->next_to_use = 0; + +#ifndef HAVE_NO_XDP_BUFF_RXQ + if (xdp_rxq_info_reg(&ring->xdp_rxq, adapter->netdev, ring->idx, 0) < 0) { + LOG_ERROR_BDF("ring[%u] xdp rxq info reg failed\n",ring->idx); + goto l_xdp_rxq_reg_failed; + } +#endif + + ring->xdp_prog = adapter->xdp_prog; + + return 0; + +#ifndef HAVE_NO_XDP_BUFF_RXQ +l_xdp_rxq_reg_failed: + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; +#endif + +l_base_addr_alloc_failed: + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + +l_rx_buf_alloc_failed: + LOG_DEV_ERR("unable to allocate memory for the Rx descriptor ring\n"); + return ret; +} + +void sxevf_rx_ring_buffer_clean(struct sxevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + struct sxevf_rx_buffer *rx_buffer = &ring->rx_buffer_info[ntc]; +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + LOG_DEBUG("clean ring[%u] from ntc=%u, next_to_alloc=%u, rx_buffer[%p]\n", + ring->idx, ntc, ring->next_to_alloc, rx_buffer); + + if (ring->skb) { + dev_kfree_skb(ring->skb); + ring->skb = NULL; + } + + while (ntc != ring->next_to_alloc) { + + dma_sync_single_range_for_cpu(ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + sxevf_rx_bufsz(ring), + DMA_FROM_DEVICE); + + dma_unmap_page_attrs(ring->dev, rx_buffer->dma, + sxevf_rx_pg_size(ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXEVF_RX_DMA_ATTR); +#endif + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + ntc++; + rx_buffer++; + if (ntc == ring->depth) { + ntc = 0; + rx_buffer = ring->rx_buffer_info; + } + } + + ring->next_to_alloc = 0; + ring->next_to_clean = 0; + ring->next_to_use = 0; + + return; +} + +static void sxevf_rx_ring_free(struct sxevf_ring *ring) +{ + sxevf_rx_ring_buffer_clean(ring); + + ring->xdp_prog = NULL; +#ifndef HAVE_NO_XDP_BUFF_RXQ + xdp_rxq_info_unreg(&ring->xdp_rxq); +#endif + + if (ring->rx_buffer_info) { + vfree(ring->rx_buffer_info); + ring->rx_buffer_info = NULL; + } + + if (ring->desc.base_addr) { + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; + } + + return; +} + +void sxevf_rx_resources_free(struct sxevf_adapter *adapter) +{ + u16 i; + + LOG_DEBUG_BDF("free:adapter->rx_ring_ctxt.num = %u\n", adapter->rx_ring_ctxt.num); + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + if (adapter->rx_ring_ctxt.ring[i]->desc.base_addr) { + sxevf_rx_ring_free(adapter->rx_ring_ctxt.ring[i]); + } + } + + return; +} + +static int sxevf_rx_resources_alloc(struct sxevf_adapter *adapter) +{ + int ret; + u16 i; + + LOG_DEBUG_BDF("adapter->rx_ring_ctxt.num = %u\n", adapter->rx_ring_ctxt.num); + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + ret = sxevf_rx_ring_alloc(adapter->rx_ring_ctxt.ring[i]); + if (!ret) { + continue; + } + + LOG_DEV_ERR("allocation for Rx ring[%u] failed\n", i); + goto l_err; + } + + return 0; + +l_err: + while (i--) { + sxevf_rx_ring_free(adapter->rx_ring_ctxt.ring[i]); + } + + return ret; +} + +s32 sxevf_rx_ring_depth_reset(struct sxevf_adapter *adapter, u32 rx_cnt) +{ + s32 ret; + u32 i, rx_ring_cnt; + struct sxevf_ring *temp_ring; + struct sxevf_ring **rx_ring = adapter->rx_ring_ctxt.ring; + + rx_ring_cnt = adapter->rx_ring_ctxt.num; + temp_ring = vmalloc(array_size(rx_ring_cnt, sizeof(struct sxevf_ring))); + if (!temp_ring) { + LOG_ERROR_BDF("vmalloc temp_ring failed, size=%lu\n", + array_size(rx_ring_cnt, sizeof(struct sxevf_ring))); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + memcpy(&temp_ring[i], rx_ring[i], sizeof(struct sxevf_ring)); + +#ifndef HAVE_NO_XDP_BUFF_RXQ + memset(&temp_ring[i].xdp_rxq, 0, sizeof(temp_ring[i].xdp_rxq)); +#endif + temp_ring[i].depth = rx_cnt; + ret = sxevf_rx_ring_alloc(&temp_ring[i]); + if (ret) { + LOG_ERROR_BDF("xdp ring alloc failed, rx ring idx=%d\n", i); + goto l_rx_free; + } + } + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxevf_rx_ring_free(rx_ring[i]); + memcpy(rx_ring[i], &temp_ring[i], sizeof(struct sxevf_ring)); + } + + adapter->rx_ring_ctxt.depth = rx_cnt; + goto l_temp_free; + +l_rx_free: + while (i--) { + sxevf_rx_ring_free(&temp_ring[i]); + } + +l_temp_free: + vfree(temp_ring); + +l_end: + return ret; +} + +static void sxevf_rx_buffer_size_set(struct sxevf_adapter *adapter, + struct sxevf_ring *rx_ring) +{ +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + u32 max_frame = sxevf_sw_mtu_get(adapter); +#endif +#endif + + clear_bit(SXEVF_RX_3K_BUFFER, &rx_ring->state); + clear_bit(SXEVF_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + if (adapter->cap & SXEVF_RX_LEGACY_ENABLE) { + LOG_INFO_BDF("in rx legacy mode, buffer size=2K\n"); + goto l_end; + } + + set_bit(SXEVF_RX_BUILD_SKB_ENABLED, &rx_ring->state); + LOG_INFO_BDF("in rx build skb mode, buffer size=2K\n"); + +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + if (max_frame > SXEVF_MAX_FRAME_BUILD_SKB) { + set_bit(SXEVF_RX_3K_BUFFER, &rx_ring->state); + LOG_INFO_BDF("in rx max frame build skb mode, buffer size=3K\n"); + } + +#endif +#else + adapter->cap |= SXEVF_RX_LEGACY_ENABLE; +#endif + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +l_end: +#endif + return; +} + +STATIC bool sxevf_mapped_page_alloc(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buf) +{ + bool ret; + struct page *page = rx_buf->page; + dma_addr_t dma; +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + + if (likely(page)) { + ret = true; + goto l_ret; + } + + page = dev_alloc_pages(sxevf_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + LOG_DEBUG("ring[%u] page alloc failed\n", rx_ring->idx); + rx_ring->rx_stats.alloc_rx_page_failed++; + ret = false; + goto l_ret; + } + + dma = dma_map_page_attrs(rx_ring->dev, page, 0, + sxevf_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXEVF_RX_DMA_ATTR); +#endif + if (dma_mapping_error(rx_ring->dev, dma)) { + LOG_DEBUG("ring[%u] dma mapping failed\n", rx_ring->idx); + __free_pages(page, sxevf_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + ret = false; + goto l_ret; + } + + rx_buf->dma = dma; + rx_buf->page = page; + rx_buf->page_offset = sxevf_rx_offset(rx_ring); + rx_buf->pagecnt_bias = 1; + rx_ring->rx_stats.alloc_rx_page++; + + return true; + +l_ret: + return ret; +} + +void sxevf_rx_ring_buffers_alloc(struct sxevf_ring *rx_ring, u16 cleaned_count) +{ + union sxevf_rx_data_desc *rx_desc; + struct sxevf_rx_buffer *rx_buffer; + u16 ntu = rx_ring->next_to_use; + u16 bufsz; + + LOG_DEBUG("ring[%u] entry, cleand_count=%u, next_to_use=%u\n", + rx_ring->idx, cleaned_count, ntu); + + if (!cleaned_count || !rx_ring->netdev) { + return; + } + + rx_desc = SXEVF_RX_DESC(rx_ring, ntu); + rx_buffer = &rx_ring->rx_buffer_info[ntu]; + ntu -= rx_ring->depth; + + bufsz = sxevf_rx_bufsz(rx_ring); + LOG_DEBUG("bufsz=%u\n", bufsz); + + do { + if (!sxevf_mapped_page_alloc(rx_ring, rx_buffer)) { + LOG_DEBUG("page alloc failed, clean_count left:%u\n", + cleaned_count); + break; + } + + dma_sync_single_range_for_device(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, bufsz, + DMA_FROM_DEVICE); + + rx_desc->read.pkt_addr = + cpu_to_le64(rx_buffer->dma + rx_buffer->page_offset); + + rx_desc++; + rx_buffer++; + ntu++; + if (unlikely(!ntu)) { + rx_desc = SXEVF_RX_DESC(rx_ring, 0); + rx_buffer = rx_ring->rx_buffer_info; + ntu -= rx_ring->depth; + } + + rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); + + ntu += rx_ring->depth; + + if (rx_ring->next_to_use != ntu) { + rx_ring->next_to_use = ntu; + + rx_ring->next_to_alloc = ntu; + + wmb(); + writel(ntu, rx_ring->desc.tail); + } + + return; +} + +static inline void sxevf_rx_ring_source_init(struct sxevf_ring *ring) +{ + union sxevf_rx_data_desc *rx_desc; + + memset(ring->rx_buffer_info, 0, + sizeof(struct sxevf_rx_buffer) * ring->depth); + + rx_desc = SXEVF_RX_DESC(ring, 0); + rx_desc->wb.upper.length = 0; + + ring->next_to_clean = 0; + ring->next_to_use = 0; + ring->next_to_alloc = 0; + + return; +} + +static inline void sxevf_rx_ring_reg_configure(struct sxevf_adapter *adapter, + struct sxevf_ring *ring) +{ + struct sxevf_hw *hw = &adapter->hw; + u64 desc_dma_addr = ring->desc.dma; + u8 reg_idx = ring->reg_idx; + u32 desc_mem_len; + u32 pkg_buf_len; + + hw->dma.ops->rx_ring_switch(hw, reg_idx, false); + + desc_mem_len = ring->depth * sizeof(union sxevf_rx_data_desc); + hw->dma.ops->rx_ring_desc_configure(hw, desc_mem_len, desc_dma_addr, reg_idx); + + ring->desc.tail = adapter->hw.reg_base_addr + SXE_VFRDT(reg_idx); + + if (test_bit(SXEVF_RX_3K_BUFFER, &ring->state)) { + pkg_buf_len = SXEVF_RXBUFFER_3K; + } else { + pkg_buf_len = SXEVF_RXBUFFER_2K; + } + + hw->dma.ops->rx_rcv_ctl_configure(hw, reg_idx, + SXEVF_RX_HDR_SIZE, pkg_buf_len, true); + + hw->dma.ops->rx_ring_switch(hw, reg_idx, true); + + return; +} + +static void sxevf_rx_ring_configure(struct sxevf_adapter *adapter) +{ + u32 i; + struct sxevf_ring **ring = adapter->rx_ring_ctxt.ring; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + sxevf_rx_ring_source_init(ring[i]); + + sxevf_rx_buffer_size_set(adapter, ring[i]); + + sxevf_rx_ring_reg_configure(adapter, ring[i]); + + sxevf_rx_ring_buffers_alloc(ring[i], + sxevf_desc_unused(ring[i])); + } + + return; +} + +static void sxevf_vlan_restore(struct sxevf_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { + sxevf_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); + LOG_DEBUG_BDF("restore vlan[%u] succeed\n", vid); + } + + return; +} + +static void sxevf_max_used_rx_queue_configure(struct sxevf_adapter *adapter) +{ + struct sxevf_hw *hw = &adapter->hw; + u16 max_rx_ring = adapter->rx_ring_ctxt.num; + + hw->dbu.ops->rx_max_used_ring_set(hw, max_rx_ring); + return; +} + +s32 sxevf_rx_max_frame_configure(struct sxevf_adapter *adapter, u32 mtu) +{ + s32 ret; + struct sxevf_hw *hw = &adapter->hw; + + spin_lock_bh(&adapter->mbx_lock); + ret = sxevf_rx_max_frame_set(hw, mtu); + spin_unlock_bh(&adapter->mbx_lock); + if (ret) { + LOG_DEV_ERR("failed to set MTU at %u\n", mtu); + } + + return ret; +} + +void sxevf_hw_rx_configure(struct sxevf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + sxevf_set_rx_mode(netdev); + + sxevf_vlan_restore(adapter); + + sxevf_max_used_rx_queue_configure(adapter); + sxevf_rx_max_frame_configure(adapter, netdev->mtu); + + sxevf_rx_ring_configure(adapter); + + return; +} + +s32 sxevf_rx_configure(struct sxevf_adapter *adapter) +{ + s32 ret; + u16 queues; + struct net_device *netdev = adapter->netdev; + + ret = sxevf_rx_resources_alloc(adapter); + if (ret) { + LOG_ERROR_BDF("allocation for Rx resources failed:Out of memory\n"); + goto l_ret; + } + + sxevf_hw_rx_configure(adapter); + + queues = adapter->rx_ring_ctxt.num; + ret = netif_set_real_num_rx_queues(netdev, queues); + if (ret) { + goto l_err_clean; + } + + return 0; + +l_err_clean: + sxevf_rx_release(adapter); + +l_ret: + return ret; +} + +static struct sxevf_rx_buffer *sxevf_rx_buffer_get(struct sxevf_ring *rx_ring, + union sxevf_rx_data_desc *rx_desc, + struct sk_buff **skb, + const u32 size) +{ + struct sxevf_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + size, + DMA_FROM_DEVICE); + + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void sxevf_rx_buffer_flip(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + u32 truesize = sxevf_rx_pg_size(rx_ring) / SXEVF_BUFFER_PER_4KPAGE; + + rx_buffer->page_offset ^= truesize; +#else + u32 truesize = vf_ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(SXEVF_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif + return; +} + +static void sxevf_rx_frag_add(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + struct sk_buff *skb, + u32 size) +{ +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + u32 truesize = sxevf_rx_pg_size(rx_ring) / SXEVF_BUFFER_PER_4KPAGE; +#else + u32 truesize = vf_ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(SXEVF_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + return; +} + +#ifndef HAVE_NET_PREFETCH_API +static inline void net_prefetch(void *data) +{ + prefetch(data); +#if L1_CACHE_BYTES < 128 + prefetch(data + L1_CACHE_BYTES); +#endif +} +#endif + +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC +STATIC struct sk_buff *sxevf_skb_build(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union sxevf_rx_data_desc *rx_desc) +{ +#ifdef HAVE_XDP_BUFF_DATA_META + u32 metasize = xdp->data - xdp->data_meta; +#endif + +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + u32 truesize = sxevf_rx_pg_size(rx_ring) / SXEVF_BUFFER_PER_4KPAGE; +#else + u32 truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + +#ifdef HAVE_XDP_BUFF_DATA_META + net_prefetch(xdp->data_meta); +#else + net_prefetch(xdp->data); +#endif + + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) { + goto l_ret; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); +#ifdef HAVE_XDP_BUFF_DATA_META + if (metasize) { + skb_metadata_set(skb, metasize); + } +#endif + +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + +l_ret: + return skb; +} +#endif + +#ifdef ETH_GET_HEADLEN_API_NEED_2_PARAM +static inline u32 +eth_get_headlen_compat(const struct net_device __always_unused *dev, void *data, + unsigned int len) +{ + return eth_get_headlen(data, len); +} + +#define eth_get_headlen(dev, data, len) eth_get_headlen_compat(dev, data, len) +#endif + +static struct sk_buff *sxevf_skb_construct(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union sxevf_rx_data_desc *rx_desc) +{ + u32 size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + u32 truesize = sxevf_rx_pg_size(rx_ring) / SXEVF_BUFFER_PER_4KPAGE; +#else + u32 truesize = SKB_DATA_ALIGN(xdp->data_end - + xdp->data_hard_start); +#endif + struct sk_buff *skb; + u32 headlen; + + net_prefetch(xdp->data); + + skb = napi_alloc_skb(&rx_ring->irq_data->napi, SXEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + goto l_ret; + } + + headlen = size; + if (headlen > SXEVF_RX_HDR_SIZE) { + headlen = eth_get_headlen(skb->dev, xdp->data, + SXEVF_RX_HDR_SIZE); + } + + memcpy(__skb_put(skb, headlen), xdp->data, + ALIGN(headlen, sizeof(long))); + + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, + (xdp->data + headlen) - + page_address(rx_buffer->page), + size, truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + rx_buffer->pagecnt_bias++; + } + +l_ret: + return skb; +} + +#ifndef HAVE_DEV_PAGE_IS_REUSABLE_API +static inline bool dev_page_is_reusable_compat(struct page *page) +{ + return likely(page_to_nid(page) == numa_mem_id() && \ + !page_is_pfmemalloc(page)); +} +#define dev_page_is_reusable dev_page_is_reusable_compat +#endif + +STATIC bool sxevf_is_rx_page_can_reuse(struct sxevf_rx_buffer *rx_buffer) +{ + u32 pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + if (!dev_page_is_reusable(page)) { + LOG_DEBUG("page[%p] can not reuse since it is reserved page, " + "page_numa_id=%d, cpu_numa_id=%d, pfmemalloc:%s\n", + page, page_to_nid(page), numa_mem_id(), + page_is_pfmemalloc(page) ? "yes" : "no"); + goto l_false; + } + +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + LOG_DEBUG("page[%p] (page_ref_count(page) - pagecnt_bias)=%d\n", + page, (page_ref_count(page) - pagecnt_bias)); + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) { + LOG_DEBUG("page[%p] can not reuse page_ref=%d --- bias=%d\n", + page, page_ref_count(page), pagecnt_bias); + goto l_false; + } +#else +#define SXEVF_LAST_OFFSET \ + (SKB_WITH_OVERHEAD(PAGE_SIZE) - SXEVF_RXBUFFER_2K) + + if (rx_buffer->page_offset > SXEVF_LAST_OFFSET) { + LOG_DEBUG("page[%p] can not reuse rx_buffer->page_offset=%u > %u\n", + page, rx_buffer->page_offset, SXEVF_LAST_OFFSET); + goto l_false; + } +#endif + + if (unlikely(!pagecnt_bias)) { + page_ref_add(page, USHRT_MAX); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; + +l_false: + return false; +} + +static void sxevf_rx_page_reuse(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *old_buff) +{ + struct sxevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->depth) ? nta : 0; + + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; + + return; +} + +static inline void sxevf_rx_page_release(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ +#ifdef HAVE_DMA_ATTRS_STRUCT + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); + dma_set_attr(DMA_ATTR_WEAK_ORDERING, &attrs); +#endif + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + sxevf_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, +#ifdef HAVE_DMA_ATTRS_STRUCT + &attrs); +#else + SXEVF_RX_DMA_ATTR); +#endif + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + + return; +} + +static void sxevf_put_rx_buffer(struct sxevf_ring *rx_ring, + struct sxevf_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + if (sxevf_is_rx_page_can_reuse(rx_buffer)) { + sxevf_rx_page_reuse(rx_ring, rx_buffer); + } else { + LOG_DEBUG("ring[%u], rx_buffer[%p]'s page[%p] can release\n", + rx_ring->idx, rx_buffer, rx_buffer->page); + sxevf_rx_page_release(rx_ring, rx_buffer, skb); + } + + rx_buffer->page = NULL; + + return; +} + +static bool sxevf_is_non_eop(struct sxevf_ring *rx_ring, + union sxevf_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->depth) ? ntc : 0; + rx_ring->next_to_clean = ntc; + LOG_DEBUG("next to clean:ntc[%u]\n", ntc); + + prefetch(SXEVF_RX_DESC(rx_ring, ntc)); + + if (likely(sxevf_status_err_check(rx_desc, SXEVF_RXD_STAT_EOP))) { + LOG_DEBUG("rx_desc[%p] the last packet arrived\n",rx_desc); + goto l_false; + } + + LOG_INFO("rx_desc[%p] in chain mode\n",rx_desc); + return true; + +l_false: + return false; +} + +static bool sxevf_headers_cleanup(struct sxevf_ring *rx_ring, + union sxevf_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + bool ret = false; + + if (IS_ERR(skb)) { + ret = true; + goto l_ret; + } + + if (eth_skb_pad(skb)) { + ret = true; + goto l_ret; + } + +l_ret: + return ret; +} + +static inline void sxevf_rx_hash_set(struct sxevf_ring *ring, + union sxevf_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) { + goto l_end; + } + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + SXEVF_RXDADV_RSSTYPE_MASK; + if (!rss_type) { + goto l_end; + } + LOG_DEBUG("rss_type:0x%x, rss hash value:%u\n", rss_type, + le32_to_cpu(rx_desc->wb.lower.hi_dword.rss)); + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (SXEVF_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +l_end: + return; +} + +static void sxevf_skb_fields_process(struct sxevf_ring *rx_ring, + union sxevf_rx_data_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + + sxevf_rx_hash_set(rx_ring, rx_desc, skb); + + sxevf_rx_csum_verify(rx_ring, rx_desc, skb); + + if (sxevf_status_err_check(rx_desc, SXEVF_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + struct sxevf_adapter *adapter = netdev_priv(rx_ring->netdev); + unsigned long *active_vlans = adapter->active_vlans; + LOG_DEBUG("rx vlan id=%u\n", vid); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + } + +#ifdef SXE_IPSEC_CONFIGURE + sxevf_rx_ipsec_proc(rx_ring, rx_desc, skb); +#endif + + skb->protocol = eth_type_trans(skb, dev); + + return; +} + +static void sxevf_rx_skb_deliver(struct sxevf_irq_data *irq_data, + struct sk_buff *skb) +{ + napi_gro_receive(&irq_data->napi, skb); + return; +} + +static inline void sxevf_rx_stats_update(struct sxevf_ring *rx_ring, + u32 total_rx_bytes, + u32 total_rx_packets) +{ + LOG_DEBUG("in the irq, process total packets[%u], bytes[%u]\n", + total_rx_packets, total_rx_bytes); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + rx_ring->irq_data->rx.irq_rate.total_bytes += total_rx_bytes; + rx_ring->irq_data->rx.irq_rate.total_packets += total_rx_packets; + + return; +} + +static inline u32 sxevf_rx_frame_truesize(struct sxevf_ring *rx_ring, + unsigned int size) +{ + u32 truesize; + +#if (PAGE_SIZE < SXEVF_PAGE_SIZE_8KB) + truesize = sxevf_rx_pg_size(rx_ring) / SXEVF_BUFFER_PER_4KPAGE; +#else + truesize = vf_ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(SXEVF_SKB_PAD + size) +#ifdef HAVE_XDP_BUFF_FRAME_SIZE + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#endif + : SKB_DATA_ALIGN(size); +#endif + return truesize; +} + +u32 sxevf_rx_ring_irq_clean(struct sxevf_irq_data *irq_data, + struct sxevf_ring *rx_ring, + const u32 budget) +{ + + u32 total_rx_bytes = 0; + u32 total_rx_packets = 0; + u32 rx_offset = sxevf_rx_offset(rx_ring); + struct sxevf_adapter *adapter = irq_data->adapter; + u16 cleaned_count = sxevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; + bool xdp_xmit = false; + struct xdp_buff xdp; + +#ifdef HAVE_XDP_BUFF_FRAME_SIZE + u32 frame_sz = 0; +#if (PAGE_SIZE < 8192) + frame_sz = sxevf_rx_frame_truesize(rx_ring, 0); +#endif + +#ifdef HAVE_XDP_BUFF_INIT_API + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); +#else + xdp.frame_sz = frame_sz; +#endif +#endif + +#ifndef HAVE_NO_XDP_BUFF_RXQ + xdp.rxq = &rx_ring->xdp_rxq; +#endif + + LOG_DEBUG("entry rx IRQ:irq=%u, ring_idx=%u, ring_reg_idx=%u, ring_tc_idx=%u," + "next_to_clean=%u, next_to_use=%u, budget=%u\n", + irq_data->irq_idx, rx_ring->idx, rx_ring->reg_idx, rx_ring->tc_idx, + rx_ring->next_to_clean, rx_ring->next_to_use, budget); + + while (likely(total_rx_packets < budget)) { + union sxevf_rx_data_desc *rx_desc; + struct sxevf_rx_buffer *rx_buffer; + u32 size; + + if (cleaned_count >= SXEVF_RX_BUFFER_WRITE) { + sxevf_rx_ring_buffers_alloc(rx_ring, cleaned_count); + cleaned_count = 0; + } + + rx_desc = SXEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); + size = le16_to_cpu(rx_desc->wb.upper.length); + if (!size) { + break; + } + + LOG_DEBUG("process rx_desc[%u], write back info:" + "status_error=0x%x, length=%u, vlan=%u\n", + rx_ring->next_to_clean, + le16_to_cpu(rx_desc->wb.upper.status_error), + le16_to_cpu(rx_desc->wb.upper.length), + le16_to_cpu(rx_desc->wb.upper.vlan)); + + rmb(); + + rx_buffer = sxevf_rx_buffer_get(rx_ring, rx_desc, &skb, size); + + if (!skb) { +#ifdef HAVE_XDP_PREPARE_BUFF_API + u8 *hard_start = page_address(rx_buffer->page) + + rx_buffer->page_offset - rx_offset; + xdp_prepare_buff(&xdp, hard_start, rx_offset, size, true); +#else + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; +#ifdef HAVE_XDP_BUFF_DATA_META + xdp.data_meta = xdp.data; +#endif + xdp.data_hard_start = xdp.data - rx_offset; + xdp.data_end = xdp.data + size; +#endif + +#ifdef HAVE_XDP_BUFF_FRAME_SIZE +#if (PAGE_SIZE > SXEVF_PAGE_SIZE_4KB) + xdp.frame_sz = sxevf_rx_frame_truesize(rx_ring, size); +#endif +#endif + skb = sxevf_xdp_run(adapter, rx_ring, &xdp); + } + + if (IS_ERR(skb)) { + unsigned int xdp_res = - PTR_ERR(skb); + + if (xdp_res == SXEVF_XDP_TX) { + xdp_xmit = true; + sxevf_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + sxevf_rx_frag_add(rx_ring, rx_buffer, skb, size); +#ifndef HAVE_NO_SWIOTLB_SKIP_CPU_SYNC + } else if (vf_ring_uses_build_skb(rx_ring)) { + skb = sxevf_skb_build(rx_ring, rx_buffer, + &xdp, rx_desc); +#endif + } else { + skb = sxevf_skb_construct(rx_ring, rx_buffer, + &xdp, rx_desc); + } + + if (!skb) { + LOG_INFO("ring[%u] rx_buffer[%p] skb is NULL," + "failed to process\n",rx_ring->idx, rx_buffer); + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + sxevf_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + if (sxevf_is_non_eop(rx_ring, rx_desc, skb)) { + continue; + } + + if (sxevf_headers_cleanup(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; + } + + total_rx_bytes += skb->len; + + if ((skb->pkt_type == PACKET_BROADCAST || + skb->pkt_type == PACKET_MULTICAST) && + ether_addr_equal(rx_ring->netdev->dev_addr, + eth_hdr(skb)->h_source)) { + LOG_WARN("in vepa mode, can not handle\n"); + dev_kfree_skb_irq(skb); + continue; + } + + sxevf_skb_fields_process(rx_ring, rx_desc, skb); + + SKB_DUMP(skb); + + total_rx_packets++; + LOG_DEBUG("in loop[%u], process total bytes:%u\n", + total_rx_packets, skb->len); + + sxevf_rx_skb_deliver(irq_data, skb); + + skb = NULL; + } + + rx_ring->skb = skb; + + if (xdp_xmit) { + struct sxevf_ring *ring = adapter->xdp_ring_ctxt.ring[rx_ring->idx]; + + wmb(); + writel(ring->next_to_use, ring->desc.tail); + } + + sxevf_rx_stats_update(rx_ring, total_rx_bytes, total_rx_packets); + + return total_rx_packets; +} + +void sxevf_hw_rx_disable(struct sxevf_adapter *adapter) +{ + u32 i; + u8 reg_idx; + struct sxevf_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + reg_idx = adapter->rx_ring_ctxt.ring[i]->reg_idx; + + hw->dma.ops->rx_disable(hw, reg_idx); + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.h new file mode 100644 index 000000000000..b27cc744279d --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_rx_proc.h @@ -0,0 +1,132 @@ + +#ifndef __SXEVF_RX_H__ +#define __SXEVF_RX_H__ + +#include "sxevf.h" + +#define ALIGN_4K (4096) +#define SXEVF_RX_BUFFER_WRITE 16 + +#define SXEVF_RXBUFFER_256 256 +#define SXEVF_RXBUFFER_1536 1536 +#define SXEVF_RXBUFFER_2K 2048 +#define SXEVF_RXBUFFER_3K 3072 +#define SXEVF_RXBUFFER_4K 4096 +#define SXEVF_MAX_RXBUFFER 16384 + +#define SXEVF_RX_HDR_SIZE SXEVF_RXBUFFER_256 + +#define SXEVF_RXDADV_RSSTYPE_MASK 0x0000000F + +#define SXEVF_RXDADV_RSSTYPE_NONE 0x00000000 +#define SXEVF_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define SXEVF_RXDADV_RSSTYPE_IPV4 0x00000002 +#define SXEVF_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define SXEVF_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define SXEVF_RXDADV_RSSTYPE_IPV6 0x00000005 +#define SXEVF_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define SXEVF_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define SXEVF_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define SXEVF_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +#define SXEVF_RSS_L4_TYPES_MASK \ + ((1ul << SXEVF_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << SXEVF_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << SXEVF_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << SXEVF_RXDADV_RSSTYPE_IPV6_UDP)) + +#define SXEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#if (PAGE_SIZE < 8192) +#define SXEVF_MAX_FRAME_BUILD_SKB \ + (SKB_WITH_OVERHEAD(SXEVF_RXBUFFER_2K) - SXEVF_SKB_PAD) +#else +#define SXEVF_MAX_FRAME_BUILD_SKB SXEVF_RXBUFFER_2K +#endif + +#define vf_ring_uses_build_skb(ring) \ + test_bit(SXEVF_RX_BUILD_SKB_ENABLED, &(ring)->state) + +#ifdef HAVE_DMA_ATTRS_STRUCT +#define SXEVF_RX_DMA_ATTR NULL +#else +#define SXEVF_RX_DMA_ATTR \ + (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) +#endif + +static inline u16 sxevf_rx_pg_order(struct sxevf_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (test_bit(SXEVF_RX_3K_BUFFER, &ring->state)) { + return 1; + } + +#endif + return 0; +} + +#define sxevf_rx_pg_size(_ring) (PAGE_SIZE << sxevf_rx_pg_order(_ring)) + +s32 sxevf_rx_configure(struct sxevf_adapter *adapter); +void sxevf_rx_resources_free(struct sxevf_adapter *adapter); + +static inline u16 sxevf_desc_unused(struct sxevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->depth) + ntc - ntu - 1; +} + +static inline u32 sxevf_rx_bufsz(struct sxevf_ring *ring) +{ + u32 bufsz; + +#if (PAGE_SIZE < 8192) + if (test_bit(SXEVF_RX_3K_BUFFER, &ring->state)) { + bufsz = SXEVF_RXBUFFER_3K; + goto l_ret; + } + + if (vf_ring_uses_build_skb(ring)) { + bufsz = SXEVF_MAX_FRAME_BUILD_SKB; + goto l_ret; + } + +#endif + bufsz = SXEVF_RXBUFFER_2K; + +#if (PAGE_SIZE < 8192) +l_ret: +#endif + return bufsz; +} + +static inline u32 sxevf_rx_offset(struct sxevf_ring *rx_ring) +{ + return vf_ring_uses_build_skb(rx_ring) ? SXEVF_SKB_PAD : 0; +} + +void sxevf_rx_ring_buffer_clean(struct sxevf_ring *ring); + +u32 sxevf_rx_ring_irq_clean(struct sxevf_irq_data *q_vector, + struct sxevf_ring *rx_ring, + const u32 budget); + +s32 sxevf_rx_ring_depth_reset(struct sxevf_adapter *adapter, u32 rx_cnt); + +void sxevf_hw_rx_disable(struct sxevf_adapter *adapter); + +static inline void sxevf_rx_release(struct sxevf_adapter *adapter) +{ + sxevf_rx_resources_free(adapter); + + return; +} + +s32 sxevf_rx_configure(struct sxevf_adapter *adapter); + +void sxevf_hw_rx_configure(struct sxevf_adapter *adapter); + +s32 sxevf_rx_max_frame_configure(struct sxevf_adapter *adapter, u32 mtu); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.c new file mode 100644 index 000000000000..db7802c7f1af --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.c @@ -0,0 +1,1182 @@ + +#include +#include +#include +#include +#include +#include + +#include "sxevf.h" +#ifdef HAVE_NO_OVERFLOW_H +#include +#else +#include +#endif +#include "sxe_log.h" +#include "sxevf_tx_proc.h" +#include "sxevf_irq.h" +#include "sxevf_hw.h" +#include "sxevf_pci.h" +#include "sxevf_monitor.h" +#include "sxevf_csum.h" +#include "sxevf_ipsec.h" +#include "sxevf_debug.h" + +#define SXEVF_IPV4 4 +#define SXEVF_SKB_MINI_LEN 17 + +void sxevf_tx_ring_buffer_clean(struct sxevf_ring *ring) +{ + union sxevf_tx_data_desc *eop_desc, *tx_desc; + u16 ntc = ring->next_to_clean; + struct sxevf_tx_buffer *tx_buffer = &ring->tx_buffer_info[ntc]; + + while (ntc != ring->next_to_use) { +#ifdef HAVE_XDP_SUPPORT + if (vf_ring_is_xdp(ring)) { + page_frag_free(tx_buffer->data); + } else { + dev_kfree_skb_any(tx_buffer->skb); + } +#else + dev_kfree_skb_any(tx_buffer->skb); +#endif + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + eop_desc = tx_buffer->next_to_watch; + tx_desc = SXEVF_TX_DESC(ring, ntc); + + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + ntc++; + if (unlikely(ntc == ring->depth)) { + ntc = 0; + tx_buffer = ring->tx_buffer_info; + tx_desc = SXEVF_TX_DESC(ring, 0); + } + + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + } + + tx_buffer++; + ntc++; + if (unlikely(ntc == ring->depth)) { + ntc = 0; + tx_buffer = ring->tx_buffer_info; + } + } + + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return; +} + +void sxevf_tx_ring_free(struct sxevf_ring *ring) +{ + sxevf_tx_ring_buffer_clean(ring); + + if (ring->tx_buffer_info) { + vfree(ring->tx_buffer_info); + ring->tx_buffer_info = NULL; + } + + if (ring->desc.base_addr) { + dma_free_coherent(ring->dev, ring->size, + ring->desc.base_addr, ring->desc.dma); + ring->desc.base_addr = NULL; + } + + return ; +} + +void sxevf_tx_resources_free(struct sxevf_adapter *adapter) +{ + u32 i; + struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + if (tx_ring[i]->desc.base_addr) { + sxevf_tx_ring_free(tx_ring[i]); + } + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + if (xdp_ring[i]->desc.base_addr) { + sxevf_tx_ring_free(xdp_ring[i]); + } + } + + return; +} + +static inline void sxevf_tx_buffer_init(struct sxevf_ring *ring) +{ + memset(ring->tx_buffer_info, 0, + sizeof(struct sxevf_tx_buffer) * ring->depth); + return; +} + +s32 sxevf_tx_ring_alloc(struct sxevf_ring *ring) +{ + s32 ret; + u32 size = sizeof(struct sxevf_tx_buffer) * ring->depth; + struct sxevf_adapter *adapter = netdev_priv(ring->netdev); + + ring->tx_buffer_info = vmalloc(size); + if (!ring->tx_buffer_info) { + ret = -ENOMEM; + goto l_err; + } + + u64_stats_init(&ring->syncp); + + ring->size = ring->depth * sizeof(union sxevf_tx_data_desc); + ring->size = ALIGN(ring->size, SXEVF_DESC_ALIGN_4K); + + ring->desc.base_addr = dma_alloc_coherent(ring->dev, ring->size, + &ring->desc.dma, GFP_KERNEL); + if (!ring->desc.base_addr) { + ret = -ENOMEM; + goto l_free; + } + + ring->next_to_use = 0; + ring->next_to_clean = 0; + return 0; + +l_free: + vfree(ring->tx_buffer_info); + ring->tx_buffer_info = NULL; + +l_err: + LOG_DEV_ERR("unable to allocate memory for the Tx descriptor ring\n"); + return ret; +} + +static s32 sxevf_tx_resources_alloc(struct sxevf_adapter *adapter) +{ + s32 ret; + u32 i, j; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + ret = sxevf_tx_ring_alloc(adapter->tx_ring_ctxt.ring[i]); + if (ret < 0) { + LOG_DEV_ERR("allocation for Tx Queue %d failed\n", i); + goto l_tx_free; + } + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++) { + ret = sxevf_tx_ring_alloc(adapter->xdp_ring_ctxt.ring[j]); + if (ret < 0) { + LOG_DEV_ERR("allocation for xdp Queue %d failed\n", j); + goto l_xdp_free; + } + } + + return 0; + +l_xdp_free: + while (j--) { + sxevf_tx_ring_free(adapter->xdp_ring_ctxt.ring[j]); + } + +l_tx_free: + while (i--) { + sxevf_tx_ring_free(adapter->tx_ring_ctxt.ring[i]); + } + + return ret; +} + +s32 sxevf_tx_ring_depth_reset(struct sxevf_adapter *adapter, u32 tx_cnt) +{ + s32 ret; + u32 i, j, tx_ring_cnt; + struct sxevf_ring *temp_ring; + struct sxevf_ring **tx_ring = adapter->tx_ring_ctxt.ring; + struct sxevf_ring **xdp_ring = adapter->xdp_ring_ctxt.ring; + + tx_ring_cnt = adapter->tx_ring_ctxt.num + adapter->xdp_ring_ctxt.num; + temp_ring = vmalloc(array_size(tx_ring_cnt, sizeof(struct sxevf_ring))); + if (!temp_ring) { + LOG_ERROR("vmalloc failed, size=%lu\n", + array_size(tx_ring_cnt, sizeof(struct sxevf_ring))); + ret = -ENOMEM; + goto l_end; + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + memcpy(&temp_ring[i], tx_ring[i], sizeof(struct sxevf_ring)); + temp_ring[i].depth = tx_cnt; + ret = sxevf_tx_ring_alloc(&temp_ring[i]); + if (ret < 0) { + LOG_ERROR("tx ring alloc failed, tx ring idx=%d\n", i); + goto l_tx_free; + } + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++, i++) { + memcpy(&temp_ring[i], xdp_ring[j], sizeof(struct sxevf_ring)); + temp_ring[i].depth = tx_cnt; + ret = sxevf_tx_ring_alloc(&temp_ring[i]); + if (ret < 0) { + LOG_ERROR("xdp ring alloc failed, xdp ring idx=%d\n", j); + goto l_tx_free; + } + } + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + sxevf_tx_ring_free(tx_ring[i]); + memcpy(tx_ring[i], &temp_ring[i], sizeof(struct sxevf_ring)); + } + + for (j = 0; j < adapter->xdp_ring_ctxt.num; j++, i++) { + sxevf_tx_ring_free(xdp_ring[j]); + memcpy(xdp_ring[j], &temp_ring[i], sizeof(struct sxevf_ring)); + } + + adapter->tx_ring_ctxt.depth = tx_cnt; + adapter->xdp_ring_ctxt.depth = tx_cnt; + ret = 0; + goto l_temp_free; + +l_tx_free: + while (i--) { + sxevf_tx_ring_free(&temp_ring[i]); + } + +l_temp_free: + vfree(temp_ring); + +l_end: + return ret; +} + +static void sxevf_tx_ring_reg_configure(struct sxevf_adapter *adapter, + struct sxevf_ring *ring) +{ + u32 reg_idx = ring->reg_idx; + struct sxevf_hw *hw = &adapter->hw; + u32 dma_len = ring->depth * sizeof(union sxevf_tx_data_desc); + u32 hthresh, pthresh; + u32 wthresh = 0; + + ring->desc.tail = adapter->hw.reg_base_addr + SXEVF_TDT(reg_idx); + + hw->dma.ops->tx_writeback_off(hw, reg_idx); + + if (adapter->irq_ctxt.rx_irq_interval) { + wthresh = SXEVF_TX_DESC_WRITEBACK_THRESH_8; + } + + hthresh = SXEVF_TX_DESC_HOST_THRESH_1; + pthresh = SXEVF_TX_DESC_PREFETCH_THRESH_32; + + hw->dma.ops->tx_desc_thresh_set(hw, reg_idx, wthresh, hthresh, pthresh); + + hw->dma.ops->tx_ring_desc_configure(hw, dma_len, + (u64)ring->desc.dma, reg_idx); + + hw->dma.ops->tx_ring_switch(hw, reg_idx, true); + return; +} + +void sxevf_hw_tx_configure(struct sxevf_adapter *adapter) +{ + u32 i; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + clear_bit(SXEVF_HANG_CHECK_ARMED, + &adapter->tx_ring_ctxt.ring[i]->state); + clear_bit(SXEVF_TX_XDP_RING_PRIMED, + &adapter->tx_ring_ctxt.ring[i]->state); + sxevf_tx_buffer_init(adapter->tx_ring_ctxt.ring[i]); + sxevf_tx_ring_reg_configure(adapter,adapter->tx_ring_ctxt.ring[i]); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++){ + clear_bit(SXEVF_HANG_CHECK_ARMED, + &adapter->xdp_ring_ctxt.ring[i]->state); + clear_bit(SXEVF_TX_XDP_RING_PRIMED, + &adapter->xdp_ring_ctxt.ring[i]->state); + sxevf_tx_buffer_init(adapter->xdp_ring_ctxt.ring[i]); + sxevf_tx_ring_reg_configure(adapter,adapter->xdp_ring_ctxt.ring[i]); + } + + return; +} + +s32 sxevf_tx_configure(struct sxevf_adapter *adapter) +{ + s32 ret; + + ret = sxevf_tx_resources_alloc(adapter); + if (ret) { + LOG_ERROR("tx ring init failed, ret = %d\n", ret); + goto l_err; + } + + sxevf_hw_tx_configure(adapter); + + ret = netif_set_real_num_tx_queues(adapter->netdev, + adapter->tx_ring_ctxt.num); + if (ret) { + LOG_ERROR("netif_set_real_num_tx_queues failed, ret = %d\n", ret); + sxevf_tx_resources_free(adapter); + } + +l_err: + return ret; +} + +static void sxevf_tx_ctxt_desc_set(struct sxevf_ring *tx_ring, + struct sxevf_tx_context_desc *ctxt_desc) +{ + struct sxevf_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = SXEVF_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->depth) ? i : 0; + + ctxt_desc->type_tucmd_mlhl |= SXEVF_TXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(ctxt_desc->vlan_macip_lens); + context_desc->sa_idx = cpu_to_le32(ctxt_desc->sa_idx); + context_desc->type_tucmd_mlhl = cpu_to_le32(ctxt_desc->type_tucmd_mlhl); + context_desc->mss_l4len_idx = cpu_to_le32(ctxt_desc->mss_l4len_idx); + + LOG_DEBUG("contxt desc, ring=%u, ntu=%u, ntc=%u, vlan_macip_lens=%#x, " + "sa_idx=%#x, type_tucmd_mlhl=%#x, mss_l4len_idx=%x\n", + tx_ring->idx, tx_ring->next_to_use, tx_ring->next_to_clean, + context_desc->vlan_macip_lens, + context_desc->sa_idx, + context_desc->type_tucmd_mlhl, + context_desc->mss_l4len_idx); + + return; +} + +static s32 sxevf_tso(struct sxevf_ring *tx_ring, + struct sxevf_tx_buffer *first_buff, + struct sxevf_tx_context_desc *ctxt_desc, + u8 *hdr_len) +{ + s32 ret; + u16 tucmd; + union sxevf_ip_hdr ip; + union sxevf_l4_hdr l4; + u8 *csum_start, *trans_start; + u32 mss_l4len, paylen, l4_offset, len; + struct sk_buff *skb = first_buff->skb; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) { + ret = 0; + goto l_end; + } + + LOG_DEBUG("tso start, ring[%d]\n", tx_ring->idx); + + ret = skb_cow_head(skb, 0); + if (ret < 0) { + LOG_ERROR("skb cow head failed, ret=%d\n", ret); + goto l_end; + } + + if (eth_p_mpls(first_buff->protocol)) { + ip.hdr = skb_inner_network_header(skb); + } else { + ip.hdr = skb_network_header(skb); + } + + tucmd = SXEVF_TX_CTXTD_TUCMD_L4T_TCP; + + if (ip.v4->version == SXEVF_IPV4) { + csum_start = skb_checksum_start(skb); + trans_start = ip.hdr + (ip.v4->ihl * 4); + len = csum_start - trans_start; + + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; + + LOG_INFO("tso ipv4 ip.v4->check=%u, gso_type=%x\n", + ip.v4->check, skb_shinfo(skb)->gso_type); + tucmd |= SXEVF_TX_CTXTD_TUCMD_IPV4; + + ip.v4->tot_len = 0; + first_buff->tx_features |= SXEVF_TX_FEATURE_TSO | + SXEVF_TX_FEATURE_CSUM | + SXEVF_TX_FEATURE_IPV4; + } else { + ip.v6->payload_len = 0; + first_buff->tx_features |= SXEVF_TX_FEATURE_TSO | + SXEVF_TX_FEATURE_CSUM; + } + + l4.hdr = skb_checksum_start(skb); + l4_offset = l4.hdr - skb->data; + + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); + + first_buff->gso_segs = skb_shinfo(skb)->gso_segs; + first_buff->bytecount += (first_buff->gso_segs - 1) * (*hdr_len); + + sxevf_ctxt_desc_iplen_set(ctxt_desc, (l4.hdr - ip.hdr)); + sxevf_ctxt_desc_maclen_set(ctxt_desc, (ip.hdr - skb->data)); + sxevf_ctxt_desc_tucmd_set(ctxt_desc, tucmd); + mss_l4len = (*hdr_len - l4_offset) << SXEVF_TX_CTXTD_L4LEN_SHIFT; + mss_l4len |= skb_shinfo(skb)->gso_size << SXEVF_TX_CTXTD_MSS_SHIFT; + sxevf_ctxt_desc_mss_l4len_set(ctxt_desc, mss_l4len); + + ret = 1; +l_end: + return ret; +} + +static inline u16 sxevf_desc_unused_count(struct sxevf_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->depth) + ntc - ntu - 1; +} + +static int sxevf_maybe_stop_tx(struct sxevf_ring *ring, u16 size) +{ + int ret = 0; + + netif_stop_subqueue(ring->netdev, ring->idx); + + smp_mb(); + + if (likely(sxevf_desc_unused_count(ring) < size)) { + ret = -EBUSY; + goto l_end; + } + + netif_start_subqueue(ring->netdev, ring->idx); + + ++ring->tx_stats.restart_queue; + +l_end: + return ret; +} + +STATIC netdev_tx_t sxevf_ring_maybe_stop_tx(struct sk_buff *skb, + struct sxevf_ring *tx_ring) +{ + u16 need_num; + netdev_tx_t ret = NETDEV_TX_OK; + u16 desc_cnt = SXEVF_TX_DESC_USE_COUNT(skb_headlen(skb)); +#if PAGE_SIZE > SXEVF_DATA_PER_DESC_SIZE_MAX + u16 i; + + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + desc_cnt += SXEVF_TX_DESC_USE_COUNT( + skb_frag_size(&skb_shinfo(skb)->frags[i])); + } +#else + desc_cnt += skb_shinfo(skb)->nr_frags; +#endif + need_num = desc_cnt + SXEVF_TX_NON_DATA_DESC_NUM; + + if (unlikely(sxevf_desc_unused_count(tx_ring) < need_num)) { + if (sxevf_maybe_stop_tx(tx_ring, need_num) < 0) { + ret = NETDEV_TX_BUSY; + } + + tx_ring->tx_stats.tx_busy++; + } + + return ret; +} + +static inline struct sxevf_tx_buffer *sxevf_tx_first_buffer_get( + struct sk_buff *skb, + struct sxevf_ring *ring) +{ + struct sxevf_tx_buffer *first_buff; + + first_buff = &ring->tx_buffer_info[ring->next_to_use]; + first_buff->skb = skb; + first_buff->bytecount = skb->len; + first_buff->gso_segs = 1; + + return first_buff; +} + +static u32 sxevf_tx_cmd_type(struct sk_buff *skb, u32 features) +{ + u32 cmd_type = SXEVF_TX_DESC_TYPE_DATA | + SXEVF_TX_DESC_IFCS; + + if (features & SXEVF_TX_FEATURE_VLAN) { + cmd_type |= cpu_to_le32(SXEVF_TXD_DCMD_VLE); + } + + if (features & SXEVF_TX_FEATURE_TSO) { + cmd_type |= cpu_to_le32(SXEVF_TXD_DCMD_TSE); + } + + return cmd_type; +} + +static void sxevf_tx_desc_offload_setup(u32 flags, + unsigned int paylen, + union sxevf_tx_data_desc *tx_desc) +{ + u32 olinfo_status = paylen << SXEVF_TX_DESC_PAYLEN_SHIFT; + + if (flags & SXEVF_TX_FEATURE_CSUM) { + olinfo_status |= cpu_to_le32(SXEVF_TXD_POPTS_TXSM); + } + + if (flags & SXEVF_TX_FEATURE_IPV4) { + olinfo_status |= cpu_to_le32(SXEVF_TXD_POPTS_IXSM); + } + + olinfo_status |= cpu_to_le32(SXEVF_TXD_CC); + + tx_desc->read.olinfo_status = olinfo_status; + + return; +} + +static inline void sxevf_tx_desc_update( + struct sxevf_ring *ring, + union sxevf_tx_data_desc **desc, + u16 *next_to_use) +{ + ++(*next_to_use); + ++(*desc); + if (ring->depth == *next_to_use) { + *desc = SXEVF_TX_DESC(ring, 0); + *next_to_use = 0; + } + (*desc)->read.olinfo_status = 0; + + return; +} + +static void sxevf_tx_dma_err(struct sxevf_ring *ring, + struct sxevf_tx_buffer *first_buffer, + u16 next_to_use) +{ + struct sxevf_tx_buffer *tx_buffer; + + for (;;) { + tx_buffer = &ring->tx_buffer_info[next_to_use]; + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev,dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + dma_unmap_len_set(tx_buffer, len, 0); + + if (tx_buffer == first_buffer) { + break; + } + + if (next_to_use == 0) { + next_to_use += ring->depth; + } + + --next_to_use; + } + + dev_kfree_skb_any(first_buffer->skb); + first_buffer->skb = NULL; + + ring->next_to_use = next_to_use; + + return; +} + +static s32 sxevf_tx_desc_setup(struct sxevf_ring *ring, + struct sk_buff *skb, + struct sxevf_tx_buffer *first_buffer, + union sxevf_tx_data_desc **desc, + u16 *next_to_use) +{ + struct sxevf_adapter *adapter = netdev_priv(ring->netdev); + dma_addr_t dma; + skb_frag_t *frag; + u32 map_size = skb_headlen(skb); + u32 remaining_size = skb->data_len; + u32 cmd_type = sxevf_tx_cmd_type(skb, first_buffer->tx_features); + struct sxevf_tx_buffer *tx_buffer = first_buffer; + + LOG_DEBUG("skb dma map start, line_size=%u," + " total_frag_len=%u, skb_len=%u\n", + skb_headlen(skb), skb->data_len, skb->len); + + dma = dma_map_single(ring->dev, skb->data, map_size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(ring->dev, dma)) { + LOG_DEV_ERR("tx dma map failed\n"); + goto l_dma_err; + } + dma_unmap_len_set(tx_buffer, len, map_size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + (*desc)->read.buffer_addr = cpu_to_le64(dma); + + while (unlikely(map_size > SXEVF_DATA_PER_DESC_SIZE_MAX)) { + (*desc)->read.cmd_type_len = \ + cpu_to_le32(cmd_type ^ SXEVF_DATA_PER_DESC_SIZE_MAX); + + sxevf_tx_desc_update(ring, desc, next_to_use); + dma += SXEVF_DATA_PER_DESC_SIZE_MAX; + map_size -= SXEVF_DATA_PER_DESC_SIZE_MAX; + + (*desc)->read.buffer_addr = cpu_to_le64(dma); + } + + if (likely(!remaining_size)) { + cmd_type |= map_size | SXEVF_TX_DESC_CMD; + (*desc)->read.cmd_type_len = cpu_to_le32(cmd_type); + LOG_DEBUG("skb dma map, current_map_size=%u, remaining_size=%u, " + "desc_ptr=%p, dma_addr=%#llx, desc.buffer_addr = %#llx, " + "desc.cmdtype=%#x, desc.olinfo_status=%#x\n", + map_size, remaining_size, *desc, + (U64)dma, (*desc)->read.buffer_addr, + (*desc)->read.cmd_type_len, + (*desc)->read.olinfo_status); + break; + } + + (*desc)->read.cmd_type_len = cpu_to_le32(cmd_type ^ map_size); + + LOG_DEBUG("skb dma map, current_map_size=%u, remaining_size=%u, " + "desc_ptr=%p, dma_addr=%#llx, desc.buffer_addr = %#llx, " + "desc.cmdtype=%#x, desc.olinfo_status=%#x\n", + map_size, remaining_size, *desc, + (U64)dma, (*desc)->read.buffer_addr, + (*desc)->read.cmd_type_len, + (*desc)->read.olinfo_status); + + sxevf_tx_desc_update(ring, desc, next_to_use); + + map_size = skb_frag_size(frag); + remaining_size -= map_size; + dma = skb_frag_dma_map(ring->dev, frag, 0, + map_size, DMA_TO_DEVICE); + + tx_buffer = &ring->tx_buffer_info[*next_to_use]; + } + + LOG_DEBUG("skb dma map end\n"); + return 0; +l_dma_err: + sxevf_tx_dma_err(ring, first_buffer, *next_to_use); + return -ENOMEM; +} + +static s32 sxevf_xmit_pkt(struct sxevf_ring *ring, + struct sxevf_tx_buffer *first_buffer, + const u8 hdr_len) +{ + s32 ret; + struct sk_buff *skb = first_buffer->skb; + u32 tx_features = first_buffer->tx_features; + u16 ntu = ring->next_to_use; + union sxevf_tx_data_desc *desc = SXEVF_TX_DESC(ring, ntu); + + sxevf_tx_desc_offload_setup(tx_features, skb->len - hdr_len, desc); + + ret = sxevf_tx_desc_setup(ring, skb, first_buffer, &desc, &ntu); + if (ret) { + goto l_end; + } + + first_buffer->time_stamp = jiffies; + skb_tx_timestamp(skb); + + wmb(); + + first_buffer->next_to_watch = desc; + + ntu++; + if (ntu == ring->depth) { + ntu = 0; + } + ring->next_to_use = ntu; + + if (unlikely(sxevf_desc_unused_count(ring) < SXEVF_TX_DESC_NEEDED)) { + ret = sxevf_maybe_stop_tx(ring, SXEVF_TX_DESC_NEEDED); + if (ret < 0) { + LOG_WARN("the desc is not enough in the ring[%u]," + "to stop the ring, " + "desc_cnt < SXEVF_TX_DESC_NEEDED[%ld]\n", + ring->idx, SXEVF_TX_DESC_NEEDED); + } + } + + writel(ntu, ring->desc.tail); + LOG_DEBUG("send directly, ring[%u]\n", ring->idx); + + LOG_DEBUG("tx end: ring idx=%u, next_to_use=%d, " + "next_to_clean=%d, next_to_watch=%p\n", + ring->idx, ring->next_to_use, ring->next_to_clean, + first_buffer->next_to_watch); + + return 0; + +l_end: + return ret; +} + +STATIC void sxevf_tx_vlan_process(struct sk_buff *skb, + struct sxevf_tx_buffer *first_buffer, + struct sxevf_tx_context_desc *ctxt_desc) +{ + u32 tx_features = 0; + + if (skb_vlan_tag_present(skb)) { + tx_features |= SXEVF_TX_FEATURE_VLAN; + sxevf_ctxt_desc_vlan_tag_set(ctxt_desc, skb_vlan_tag_get(skb)); + sxevf_ctxt_desc_maclen_set(ctxt_desc, skb_network_offset(skb)); + } + + first_buffer->tx_features = tx_features; + first_buffer->protocol = vlan_get_protocol(skb); + + return; +} + +static s32 sxevf_tx_feature_offload(struct sxevf_ring *ring, + struct sk_buff *skb, + struct sxevf_tx_buffer *first_buffer, + u8 *hdr_len) +{ + s32 ret = 0; + s32 need_tso; + struct sxevf_tx_context_desc ctxt_desc = {0}; + + sxevf_tx_vlan_process(skb, first_buffer, &ctxt_desc); + +#ifdef SXE_IPSEC_CONFIGURE + ret = sxevf_tx_ipsec_offload(ring, first_buffer, &ctxt_desc); + if (ret) { + LOG_ERROR("ring[%u] tx ipsec offload failed.(err:%d)\n", + ring->idx, ret); + goto l_end; + } +#endif + + need_tso = sxevf_tso(ring, first_buffer, &ctxt_desc, hdr_len); + if (need_tso < 0) { + LOG_ERROR("tso deal failed, ring->idx=%u\n", ring->idx); + ret = need_tso; + goto l_end; + } else if (!need_tso) { + sxevf_tx_csum_offload(ring, first_buffer, &ctxt_desc); + } + + if (first_buffer->tx_features & + (SXEVF_TX_FEATURE_VLAN | + SXEVF_TX_FEATURE_CSUM | + SXEVF_TX_FEATURE_TSO)) { + + sxevf_tx_ctxt_desc_set(ring, &ctxt_desc); + } + +l_end: + return ret; +} + +STATIC netdev_tx_t sxevf_ring_xmit(struct sk_buff *skb, + struct sxevf_ring *ring) +{ + s32 res; + u8 hdr_len = 0; + netdev_tx_t ret = NETDEV_TX_OK; + struct sxevf_tx_buffer *first_buffer; + u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL); + + if (!dst_mac || is_link_local_ether_addr(dst_mac)) { + LOG_ERROR("dst mac addr err\n"); + dev_kfree_skb_any(skb); + goto l_end; + } + + ret = sxevf_ring_maybe_stop_tx(skb, ring); + if (ret != NETDEV_TX_OK) { + LOG_ERROR("tx busy, ring idx=%u\n", ring->idx); + goto l_end; + } + + first_buffer = sxevf_tx_first_buffer_get(skb, ring); + + res = sxevf_tx_feature_offload(ring, skb, first_buffer, &hdr_len); + if (res < 0) { + LOG_ERROR("tx offload failed, ring->idx=%u\n", ring->idx); + goto l_free; + } + + if (sxevf_xmit_pkt(ring, first_buffer, hdr_len)) { + LOG_ERROR("tx dma mapping err, ring idx=%u\n", ring->idx); + } + + return NETDEV_TX_OK; + +l_free: + dev_kfree_skb_any(first_buffer->skb); + first_buffer->skb = NULL; +l_end: + return ret; +} + +netdev_tx_t sxevf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + netdev_tx_t ret = NETDEV_TX_OK; + struct sxevf_adapter *adapter = netdev_priv(netdev); + struct sxevf_ring *tx_ring; + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + goto l_end; + } + + if (skb_put_padto(skb, SXEVF_SKB_MINI_LEN)) { + goto l_end; + } + + tx_ring = adapter->tx_ring_ctxt.ring[skb_get_queue_mapping(skb)]; + LOG_DEBUG("sxe xmit start, ring idx=%u\n", tx_ring->idx); + + SKB_DUMP(skb); + + ret = sxevf_ring_xmit(skb, tx_ring); + if (ret) { + LOG_ERROR("sxe xmit failed, ring idx=%u, status=%x\n", + tx_ring->idx, ret); + } else { + LOG_DEBUG("sxe xmit end, ring idx=%u\n", tx_ring->idx); + } + +l_end: + return ret; +} + +static inline void sxevf_tx_skb_unmap(struct sxevf_ring *ring, + struct sxevf_tx_buffer *tx_buffer) +{ + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + return; +} + +static inline void sxevf_tx_desc_buf_update( + struct sxevf_ring *ring, + struct sxevf_tx_buffer **tx_buffer, + union sxevf_tx_data_desc **tx_desc, + u32 *next_to_clean) +{ + (*tx_buffer)++; + (*tx_desc)++; + ++(*next_to_clean); + if (unlikely(!(*next_to_clean))) { + *next_to_clean -= ring->depth; + *tx_buffer = ring->tx_buffer_info; + *tx_desc = SXEVF_TX_DESC(ring, 0); + } + + return; +} + +STATIC void sxevf_tx_ring_unmap(struct sxevf_ring *ring, + s32 napi_budget, + u16 *budget, + struct sxevf_ring_stats *ring_stats) +{ + union sxevf_tx_data_desc *tx_desc; + union sxevf_tx_data_desc *eop_desc; + u32 next_to_clean = ring->next_to_clean; + struct sxevf_tx_buffer *tx_buffer; + + tx_buffer = &ring->tx_buffer_info[next_to_clean]; + tx_desc = SXEVF_TX_DESC(ring, next_to_clean); + next_to_clean -= ring->depth; + + LOG_DEBUG("tx ring clean start: ring idx=%u, ring_reg_idx=%u, " "next_to_use=%d, next_to_clean=%d, budget=%d, " + "next_to_watch=%p, " + "desc.wb.nxtseq_seed=%#08x, desc.wb.status=%#08x\n", + ring->idx, ring->reg_idx, ring->next_to_use, ring->next_to_clean, + *budget, tx_buffer->next_to_watch, + tx_desc->wb.nxtseq_seed, tx_desc->wb.status); + + do { + eop_desc = tx_buffer->next_to_watch; + if (!eop_desc) { + break; + } + + smp_rmb(); + + if (!(eop_desc->wb.status & cpu_to_le32(SXEVF_TX_DESC_STAT_DD))) { + break; + } + + tx_buffer->next_to_watch = NULL; + + ring_stats->bytes += tx_buffer->bytecount; + ring_stats->packets += tx_buffer->gso_segs; + +#ifdef HAVE_XDP_SUPPORT + if (vf_ring_is_xdp(ring)) { + page_frag_free(tx_buffer->data); + } else { + napi_consume_skb(tx_buffer->skb, napi_budget); + } +#else + napi_consume_skb(tx_buffer->skb, napi_budget); +#endif + LOG_DEBUG("tx ring clean: budget=%d, bytes=%llu, packet=%llu\n", + *budget, ring_stats->bytes, ring_stats->packets); + + sxevf_tx_skb_unmap(ring, tx_buffer); + while (tx_desc != eop_desc) { + sxevf_tx_desc_buf_update(ring, &tx_buffer, + &tx_desc, &next_to_clean); + + if (dma_unmap_len(tx_buffer, len)) { + sxevf_tx_skb_unmap(ring, tx_buffer); + } + } + sxevf_tx_desc_buf_update(ring, &tx_buffer, + &tx_desc, &next_to_clean); + + prefetch(tx_desc); + + --*budget; + }while (likely(*budget)); + + next_to_clean += ring->depth; + ring->next_to_clean = next_to_clean; + LOG_DEBUG("tx ring clean end: ring idx=%u, next_to_use=%d, " + "next_to_clean=%d, budget=%d\n", + ring->idx, ring->next_to_use, ring->next_to_clean, *budget); + + return; +} + +static u32 sxevf_hw_tx_ring_pending_get(struct sxevf_ring *ring) +{ + u32 ret = 0; + u32 head, tail; + struct sxevf_adapter *adapter = netdev_priv(ring->netdev); + struct sxevf_hw *hw = &adapter->hw; + + hw->dma.ops->tx_ring_info_get(hw, ring->idx, &head, &tail); + + if (head != tail) { + ret = (head < tail) ? + (tail - head) : (tail + ring->depth - head); + } + + return ret; +} + +static inline bool sxevf_detect_tx_hang(struct sxevf_ring *ring) +{ + bool ret; + u32 tx_done = ring->stats.packets; + u32 tx_done_old = ring->tx_stats.tx_done_old; + u32 tx_pending = sxevf_hw_tx_ring_pending_get(ring); + + SXEVF_TX_HANG_CHECK_COMPLETE(ring); + + if (tx_done_old == tx_done && tx_pending) { + LOG_INFO("ring[%u] hang \n", ring->idx); + ret = test_and_set_bit(SXEVF_HANG_CHECK_ARMED, &ring->state); + goto l_end; + } + + ring->tx_stats.tx_done_old = tx_done; + + clear_bit(SXEVF_HANG_CHECK_ARMED, &ring->state); + + ret = false; +l_end: + return ret; +} + +static void sxevf_tx_timeout_reset(struct sxevf_adapter *adapter) +{ + if (!test_bit(SXEVF_DOWN, &adapter->state)) { + set_bit(SXEVF_RESET_REQUESTED, &adapter->monitor_ctxt.state); + sxevf_monitor_work_schedule(adapter); + LOG_INFO("tx timeout reset adapter\n"); + } + + return; +} + +#ifdef HAVE_TIMEOUT_TXQUEUE_IDX +void sxevf_tx_timeout(struct net_device *netdev, u32 __always_unused txqueue) +#else +void sxevf_tx_timeout(struct net_device *netdev) +#endif +{ + struct sxevf_adapter *adapter = netdev_priv(netdev); + + sxevf_tx_timeout_reset(adapter); + + return; +} + +static inline bool sxevf_tx_hang_handle(struct sxevf_adapter *adapter, + struct sxevf_ring *ring) +{ + u32 tdh, tdt; + bool ret = false; + struct sxevf_hw *hw = &adapter->hw; + + if (SXEVF_DETECT_TX_HANG_NEED(ring) && sxevf_detect_tx_hang(ring)) { +#ifdef HAVE_XDP_SUPPORT + if (!vf_ring_is_xdp(ring)) +#endif + netif_stop_subqueue(ring->netdev, ring->idx); + + hw->dma.ops->tx_ring_info_get(hw, ring->reg_idx, &tdh, &tdt); + LOG_DEV_ERR("detected Tx hang %s\n" + " Tx ring <%u>\n" + " ring reg <%u>\n" + " TDH, TDT <%u>, <%u>\n" + " next_to_use <%u>\n" + " next_to_clean <%u>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%llx>\n", +#ifdef HAVE_XDP_SUPPORT + vf_ring_is_xdp(ring) ? " XDP" : "", +#else + "", +#endif + ring->idx, ring->reg_idx, + tdh, tdt, + ring->next_to_use, ring->next_to_clean, + ring->tx_buffer_info[ring->next_to_clean].time_stamp, + (U64)jiffies); + + sxevf_tx_timeout_reset(adapter); + + ret = true; + } + + return ret; +} + +static inline void sxevf_tx_pkt_stats_update(struct sxevf_irq_rate *irq_rate, + struct sxevf_ring *ring, + struct sxevf_ring_stats *stats) +{ + u64_stats_update_begin(&ring->syncp); + ring->stats.bytes += stats->bytes; + ring->stats.packets += stats->packets; + u64_stats_update_end(&ring->syncp); + + irq_rate->total_bytes += stats->bytes; + irq_rate->total_packets += stats->packets; + + return; +} + +bool sxevf_tx_ring_irq_clean(struct sxevf_irq_data *irq, + struct sxevf_ring *ring, s32 napi_budget) +{ + bool ret; + struct sxevf_ring_stats ring_stats = {}; + u16 budget = ring->depth / 2; + struct sxevf_adapter *adapter = irq->adapter; + + if (test_bit(SXEVF_DOWN, &adapter->state)) { + ret = true; + goto l_end; + } + + sxevf_tx_ring_unmap(ring, napi_budget, &budget, &ring_stats); + + sxevf_tx_pkt_stats_update(&irq->tx.irq_rate, ring, &ring_stats); + + if (sxevf_tx_hang_handle(adapter, ring)) { + ret = true; + goto l_end; + } + +#ifdef HAVE_XDP_SUPPORT + if (vf_ring_is_xdp(ring)) { + LOG_INFO("xdp ring[%u] xmit finish ,return\n", ring->idx); + ret = !!budget; + goto l_end; + } +#endif + + if (unlikely(ring_stats.packets && + netif_carrier_ok(ring->netdev) && + (sxevf_desc_unused_count(ring) >= \ + SXEVF_TX_WAKE_THRESHOLD))) { + + smp_mb(); + if (__netif_subqueue_stopped(ring->netdev, ring->idx) && \ + !test_bit(SXEVF_DOWN, &adapter->state)) { + netif_wake_subqueue(ring->netdev, ring->idx); + ++ring->tx_stats.restart_queue; + LOG_DEBUG("\n\n ring idx=%u, wake_up\n\n", ring->idx); + } + } + + ret = !!budget; + +l_end: + return ret; +} + +bool sxevf_xdp_ring_irq_clean(struct sxevf_irq_data *irq, + struct sxevf_ring *xdp_ring, s32 napi_budget) +{ + return sxevf_tx_ring_irq_clean(irq, xdp_ring, napi_budget); + +} + +void sxevf_hw_tx_disable(struct sxevf_adapter *adapter) +{ + u32 i; + u8 reg_idx; + struct sxevf_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->tx_ring_ctxt.num; i++) { + reg_idx = adapter->tx_ring_ctxt.ring[i]->reg_idx; + + hw->dma.ops->tx_ring_switch(hw, reg_idx, false); + } + + for (i = 0; i < adapter->xdp_ring_ctxt.num; i++) { + reg_idx = adapter->xdp_ring_ctxt.ring[i]->reg_idx; + + hw->dma.ops->tx_ring_switch(hw, reg_idx, false); + } + + return; +} diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.h new file mode 100644 index 000000000000..5ed8990371d3 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_tx_proc.h @@ -0,0 +1,118 @@ +#ifndef __SXEVF_TX_PROC_H__ +#define __SXEVF_TX_PROC_H__ + +#include "sxevf.h" +#include "sxevf_ring.h" + +#define SXEVF_TX_CTXTD_TUCMD_IPV4 0x00000400 +#define SXEVF_TX_CTXTD_TUCMD_L4T_UDP 0x00000000 +#define SXEVF_TX_CTXTD_TUCMD_L4T_TCP 0x00000800 +#define SXEVF_TX_CTXTD_TUCMD_L4T_SCTP 0x00001000 +#define SXEVF_TX_CTXTD_TUCMD_IPSEC_TYPE_ESP 0x00002000 +#define SXEVF_TX_CTXTD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 + +#define SXEVF_TX_CTXTD_L4LEN_SHIFT 8 +#define SXEVF_TX_CTXTD_MSS_SHIFT 16 +#define SXEVF_TX_CTXTD_MACLEN_SHIFT 9 +#define SXEVF_TX_CTXTD_VLAN_MASK 0xffff0000 + +enum sxevf_tx_features { + SXEVF_TX_FEATURE_CSUM = BIT(0), + SXEVF_TX_FEATURE_VLAN = BIT(1), + SXEVF_TX_FEATURE_TSO = BIT(2), + SXEVF_TX_FEATURE_IPV4 = BIT(3), + SXEVF_TX_FEATURE_IPSEC = BIT(4), +}; + +union sxevf_ip_hdr { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; +union sxevf_l4_hdr { + struct tcphdr *tcp; + unsigned char *hdr; +}; + +#ifdef HAVE_TIMEOUT_TXQUEUE_IDX +void sxevf_tx_timeout(struct net_device *netdev, u32 __always_unused txqueue); +#else +void sxevf_tx_timeout(struct net_device *netdev); +#endif + +s32 sxevf_tx_ring_depth_reset(struct sxevf_adapter *adapter, u32 tx_cnt); + +void sxevf_hw_tx_disable(struct sxevf_adapter *adapter); + +void sxevf_hw_tx_configure(struct sxevf_adapter *adapter); + +s32 sxevf_tx_configure(struct sxevf_adapter *adapter); + +void sxevf_tx_ring_buffer_clean(struct sxevf_ring *ring); + +void sxevf_tx_resources_free(struct sxevf_adapter *adapter); + +bool sxevf_tx_ring_irq_clean(struct sxevf_irq_data *irq, + struct sxevf_ring *ring, s32 napi_budget); + +bool sxevf_xdp_ring_irq_clean(struct sxevf_irq_data *irq, + struct sxevf_ring *xdp_ring, s32 napi_budget); + +netdev_tx_t sxevf_xmit(struct sk_buff *skb, struct net_device *netdev); + +static inline void sxevf_ctxt_desc_iplen_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 iplen) +{ + ctxt_desc->vlan_macip_lens |= iplen; + return; +} + +static inline void sxevf_ctxt_desc_maclen_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 maclen) +{ + ctxt_desc->vlan_macip_lens &= ~SXEVF_TX_CTXTD_MACLEN_MASK; + ctxt_desc->vlan_macip_lens |= maclen << SXEVF_TX_CTXTD_MACLEN_SHIFT; + return; +} + +static inline void sxevf_ctxt_desc_vlan_tag_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 vlan_tag) +{ + ctxt_desc->vlan_macip_lens |= vlan_tag << SXEVF_TX_CTXTD_VLAN_SHIFT; + return; +} + +static inline void sxevf_ctxt_desc_sa_idx_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 sa_idx) +{ + ctxt_desc->sa_idx = sa_idx; + return; +} + +static inline void sxevf_ctxt_desc_tucmd_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 tucmd) +{ + ctxt_desc->type_tucmd_mlhl |= tucmd; + return; +} + +static inline void sxevf_ctxt_desc_mss_l4len_set( + struct sxevf_tx_context_desc *ctxt_desc, u32 mss_l4len) +{ + ctxt_desc->mss_l4len_idx = mss_l4len; + return; +} + +static inline __be16 sxevf_ctxt_desc_vlan_tag_get( + struct sxevf_tx_context_desc *ctxt_desc) +{ + return (ctxt_desc->vlan_macip_lens >> SXEVF_TX_CTXTD_VLAN_SHIFT); +} + +static inline void sxevf_tx_release(struct sxevf_adapter *adapter) +{ + sxevf_tx_resources_free(adapter); + + return; +} +#endif \ No newline at end of file diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.c b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.c new file mode 100644 index 000000000000..1ea932672d29 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.c @@ -0,0 +1,225 @@ +#include "sxevf_xdp.h" +#include "sxe_compat.h" + +#ifdef HAVE_XDP_SUPPORT +#include "sxevf_rx_proc.h" +#include "sxevf_netdev.h" +#include "sxevf_tx_proc.h" + +#ifndef BPF_WARN_INVALID_XDP_ACTION_API_NEED_3_PARAMS +static inline void +bpf_warn_invalid_xdp_action_compat(__maybe_unused struct net_device *dev, + __maybe_unused struct bpf_prog *prog, u32 act) +{ + bpf_warn_invalid_xdp_action(act); +} + +#define bpf_warn_invalid_xdp_action(dev, prog, act) \ + bpf_warn_invalid_xdp_action_compat(dev, prog, act) + +#endif + +static s32 sxevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) +{ + s32 ret = 0; + u32 i, frame_size = dev->mtu + SXEVF_ETH_DEAD_LOAD; + struct sxevf_adapter *adapter = netdev_priv(dev); + struct bpf_prog *old_prog; + + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + struct sxevf_ring *ring = adapter->rx_ring_ctxt.ring[i]; + + if (frame_size > sxevf_rx_bufsz(ring)) { + ret = -EINVAL; + LOG_ERROR_BDF("frame size[%u] too lagre for ring[%u]," + " buffer size=%u\n", frame_size, ring->idx, + sxevf_rx_bufsz(ring)); + goto l_ret; + } + } + + old_prog = xchg(&adapter->xdp_prog, prog); + + if (!!prog != !!old_prog) { + LOG_DEBUG_BDF("xdp prog changed from %s to %s\n", + old_prog ? "exist" : "empty", prog ? "exist" : "empty"); + if (netif_running(dev)) { + sxevf_close(dev); + } + + sxevf_ring_irq_exit(adapter); + + sxevf_ring_irq_init(adapter); + + if (netif_running(dev)) { + sxevf_open(dev); + } + } else { + LOG_DEBUG_BDF("xdp prog changed from %p to %p\n", old_prog, prog); + for (i = 0; i < adapter->rx_ring_ctxt.num; i++) { + xchg(&adapter->rx_ring_ctxt.ring[i]->xdp_prog, + adapter->xdp_prog); + } + } + + if (old_prog) { + bpf_prog_put(old_prog); + } + +l_ret: + return ret; +} + +s32 sxevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + s32 ret = 0; + struct sxevf_adapter *adapter = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + LOG_DEBUG_BDF("xdp command setup, prog=%p\n", xdp->prog); + ret = sxevf_xdp_setup(dev, xdp->prog); + break; +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; + LOG_DEBUG_BDF("xdp command query, prog_id=%u\n", xdp->prog_id); + break; +#endif + default: + LOG_DEBUG_BDF("invalid xdp command = 0x%x\n", xdp->command); + ret = -EINVAL; + } + + return ret; +} + +static s32 sxevf_xdp_ring_xmit(struct sxevf_ring *ring, + struct xdp_buff *xdp) +{ + s32 ret = SXEVF_XDP_TX; + struct sxevf_tx_buffer *tx_buffer; + union sxevf_tx_data_desc *tx_desc; + struct sxevf_tx_context_desc *context_desc; + u32 len, cmd_type; + dma_addr_t dma; + u16 i; + struct sxevf_adapter *adapter = netdev_priv(ring->netdev); + + len = xdp->data_end - xdp->data; + LOG_DEBUG_BDF("xdp ring[%u] xmit, len=%u\n", ring->idx, len); + + if (unlikely(!sxevf_desc_unused(ring))) { + LOG_ERROR_BDF("ring[%u] desc unsed ring empty\n", ring->idx); + ret = SXEVF_XDP_CONSUMED; + goto l_ret; + } + + dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(ring->dev, dma)) { + LOG_ERROR_BDF("ring[%u] dma mapping error\n", ring->idx); + ret = SXEVF_XDP_CONSUMED; + goto l_ret; + } + + i = ring->next_to_use; + tx_buffer = &ring->tx_buffer_info[i]; + + dma_unmap_len_set(tx_buffer, len, len); + dma_unmap_addr_set(tx_buffer, dma, dma); + tx_buffer->data = xdp->data; + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + if (!test_bit(SXEVF_TX_XDP_RING_PRIMED, &ring->state)) { + LOG_DEBUG_BDF("ring[%u] not setup xdp, setup it\n", ring->idx); + + set_bit(SXEVF_TX_XDP_RING_PRIMED, &ring->state); + + context_desc = SXEVF_TX_CTXTDESC(ring, 0); + context_desc->vlan_macip_lens = + cpu_to_le32(ETH_HLEN << SXEVF_TX_CTXTD_MACLEN_SHIFT); + context_desc->type_tucmd_mlhl = + cpu_to_le32(SXEVF_TXD_DTYP_CTXT); + context_desc->mss_l4len_idx = 0; + + i = 1; + } + + cmd_type = SXEVF_TX_DESC_TYPE_DATA | + SXEVF_TX_DESC_DEXT | + SXEVF_TX_DESC_IFCS; + cmd_type |= len | SXEVF_TX_DESC_CMD; + + tx_desc = SXEVF_TX_DESC(ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); + tx_desc->read.olinfo_status = + cpu_to_le32((len << SXEVF_TX_DESC_PAYLEN_SHIFT) | + SXEVF_TXD_CC); + + smp_wmb(); + + i++; + if (i == ring->depth) { + i = 0; + } + + tx_buffer->next_to_watch = tx_desc; + ring->next_to_use = i; + +l_ret: + return ret; +} + +struct sk_buff *sxevf_xdp_run(struct sxevf_adapter *adapter, + struct sxevf_ring *rx_ring, + struct xdp_buff *xdp) +{ + int result = SXEVF_XDP_PASS; + struct sxevf_ring *xdp_ring; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + if (!xdp_prog) { + LOG_INFO_BDF("rx_ring[%u] xdp prog is NULL\n", rx_ring->idx); + goto xdp_out; + } + + act = bpf_prog_run_xdp(xdp_prog, xdp); + LOG_DEBUG_BDF("rx_ring[%u] xdp run result=0x%x\n",rx_ring->idx, act); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = adapter->xdp_ring_ctxt.ring[rx_ring->idx]; + result = sxevf_xdp_ring_xmit(xdp_ring, xdp); + break; + default: + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: + result = SXEVF_XDP_CONSUMED; + break; + } +xdp_out: + rcu_read_unlock(); + return ERR_PTR(-result); +} +#else +struct sk_buff *sxevf_xdp_run(struct sxevf_adapter *adapter, + struct sxevf_ring *rx_ring, + struct xdp_buff *xdp) +{ + return ERR_PTR(-SXEVF_XDP_PASS); +} +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.h b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.h new file mode 100644 index 000000000000..c0b4683702c6 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/sxevf/sxevf_xdp.h @@ -0,0 +1,31 @@ + +#ifndef __SXEVF_XDP_H__ +#define __SXEVF_XDP_H__ + +#include "sxevf.h" +#ifdef HAVE_XDP_SUPPORT +#include +#include +#endif + +#ifdef HAVE_AF_XDP_ZERO_COPY +#ifdef HAVE_MEM_TYPE_XSK_BUFF_POOL +#include +#else +#include +#endif +#endif + +#define SXEVF_XDP_PASS 0 +#define SXEVF_XDP_CONSUMED 1 +#define SXEVF_XDP_TX 2 + +#ifdef HAVE_XDP_SUPPORT +s32 sxevf_xdp(struct net_device *dev, struct netdev_bpf *xdp); + +#endif +struct sk_buff *sxevf_xdp_run(struct sxevf_adapter *adapter, + struct sxevf_ring *rx_ring, + struct xdp_buff *xdp); + +#endif diff --git a/drivers/net/ethernet/linkdata/sxevf/vercode_build.sh b/drivers/net/ethernet/linkdata/sxevf/vercode_build.sh new file mode 100644 index 000000000000..6022874ecf54 --- /dev/null +++ b/drivers/net/ethernet/linkdata/sxevf/vercode_build.sh @@ -0,0 +1,91 @@ +#! /bin/bash + +# 传入的内核树路径 +kernel_path=$1 + +############################################### +# 兼容性列表:LIST与MACRO中的元素一一对应 +# 如需增加其他操作系统,新增LIST与对应MACRO数组 +############################################### +# NFS +NFS_LIST=("4.19.113-14.1.nfs4.x86_64" "4.19.113-40.nfs4.x86_64") +NFS_MACRO=("NFS_4_0_0613" "NFS_4_0_0612") +# UOS +UOS_LIST=("4.19.90-2201.4.0.0135.up1.uel20.x86_64" "4.19.90-2305.1.0.0199.56.uel20.x86_64" \ + "5.10.0-46.uel20.x86_64" "4.19.90-2403.3.0.0270.84.uel20.x86_64" "5.10.0-74.uel20.x86_64") +UOS_MACRO=("UOS_1050" "UOS_1060_4_19" "UOS_1060_5_10" "UOS_1070_4_19" "UOS_1070_5_10") +# kylin linux +KYLIN_LIST=("4.19.90-24.4.v2101.ky10.x86_64" "4.19.90-vhulk2001.1.0.0026.ns7.15.x86_64" \ + "4.19.90-21.2.9.wa.x86_64") +KYLIN_MACRO=("KYLIN_10_SP2" "KYLIN_0429" "KYLIN_0721") +# anolis +ANOLIS_LIST=("5.10.134-13.an8.x86_64") +ANOLIS_MACRO=("ANOLIS_8_8") +# openeuler +EULER_LIST=("5.10.0-60.18.0.50.oe2203.x86_64") +EULER_MACRO=("EULER_2203_LTS") +# bc-linux +BCLINUX_LIST=("4.19.0-240.23.11.el8_2.bclinux.x86_64" "5.10.0-200.el8_2.bclinux.x86_64") +BCLINUX_MACRO=("BCLINUX_8_2_4_19" "BCLINUX_8_2_5_10") +# culinux +CULINUX_LIST=("5.10.0-60.67.0.107.ule3.x86_64") +CULINUX_MACRO=("CULINUX_3_0") + +KERNEL_LIST=(NFS_LIST UOS_LIST KYLIN_LIST ANOLIS_LIST EULER_LIST BCLINUX_LIST CULINUX_LIST) +MACRO_LIST=(NFS_MACRO UOS_MACRO KYLIN_MACRO ANOLIS_MACRO EULER_MACRO BCLINUX_MACRO CULINUX_MACRO) + +############################################### +# 获取元素在数组中的下标 +############################################### +function getArrItemIdx(){ + local arr=$1 + local item=$2 + local index=0 + + for i in ${arr[*]}; do + if [[ $item == $i ]] + then + echo $index + return + fi + index=$(($index + 1)) + done + + echo -1 + return +} + +############################################### +# 获取要编译驱动的内核版本 +############################################### +function getKernelVersion(){ + local uts_h="/include/generated/utsrelease.h" + version_path=$1$uts_h + if [ ! -f $version_path ];then + return + fi + cat $version_path | grep UTS_RELEASE | awk '{ print $3 }' | sed 's/\"//g' + return +} + +############################################## +# main 返回当前内核版本对应的宏 +############################################## +function main(){ + local build_kernel=$(getKernelVersion $kernel_path) + local row=0 + for OS_TYPE in ${KERNEL_LIST[*]}; do + kernel_tmp=$OS_TYPE[*] + macro_tmp=${MACRO_LIST[row]}[*] + KERNELS=(${!kernel_tmp}) + MACROS=(${!macro_tmp}) + col=$(getArrItemIdx "${KERNELS[*]}" $build_kernel) + if [ $col != -1 ]; then + echo ${MACROS[col]} + return + fi + row=$(($row + 1)) + done +} + +main \ No newline at end of file