From fa4d9bb6936e3ea2674e173fc9fb9e76748eeed3 Mon Sep 17 00:00:00 2001 From: Dong Yibo Date: Thu, 4 Dec 2025 16:25:57 +0800 Subject: [PATCH] RNPGBE: NET: Update driver to 1.0.0 driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQJF CVE: NA -------------------------------- Update rnpgbe driver to 1.0.0, main changes bellow: 1. Fix ocp vlan bug. 2. Add n210L support. 3. Add crc check for firmware download. 4. Add debug show in /sys/kernel/rnpgbe. 5. Rename some funcs/regs name. 6. Fix rss-off bug. 7. Fix build warnings with W=1 C=1. 8. Fix crash when use mucse_gbe_tools. Fixes: eba3cbcfeafe ("drivers: initial support for rnpgbe drivers from Mucse Technology") Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 61 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 2010 ++++++++++------- .../net/ethernet/mucse/rnpgbe/rnpgbe_common.h | 22 +- .../ethernet/mucse/rnpgbe/rnpgbe_debugfs.c | 97 +- .../ethernet/mucse/rnpgbe/rnpgbe_ethtool.c | 124 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 12 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 1117 +++++---- .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c | 63 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h | 2 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 314 ++- .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 41 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c | 20 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h | 1 - .../net/ethernet/mucse/rnpgbe/rnpgbe_regs.h | 419 ++-- .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c | 12 + .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h | 3 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c | 135 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c | 275 ++- .../net/ethernet/mucse/rnpgbe/rnpgbe_type.h | 51 +- drivers/net/ethernet/mucse/rnpgbe/version.h | 2 +- 20 files changed, 2873 insertions(+), 1908 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index a646c4fbc83e..b75fe83035b6 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -26,6 +26,7 @@ extern struct rnpgbe_info rnpgbe_n500_info; extern struct rnpgbe_info rnpgbe_n210_info; +extern struct rnpgbe_info rnpgbe_n210L_info; /* common prefix used by pr_<> macros */ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -70,6 +71,7 @@ extern struct rnpgbe_info rnpgbe_n210_info; #define ACTION_TO_MPE (130) #define MPE_PORT (10) #define AUTO_ALL_MODES 0 +#define CHECK_DATA (0xaabc) /* TX/RX descriptor defines */ #ifdef FEITENG #define RNP_DEFAULT_TXD 4096 @@ -94,10 +96,8 @@ extern struct rnpgbe_info rnpgbe_n210_info; #define RNP_MIN_FCRTH 0x600 #define RNP_MAX_FCRTH 0x7FFF0 #define RNP_DEFAULT_FCPAUSE 0xFFFF -#define RNP10_DEFAULT_HIGH_WATER 0x320 -#define RNP10_DEFAULT_LOW_WATER 0x270 -#define RNP500_DEFAULT_HIGH_WATER 400 -#define RNP500_DEFAULT_LOW_WATER 256 +#define RNPGBE_DEFAULT_HIGH_WATER 400 +#define RNPGBE_DEFAULT_LOW_WATER 256 #define RNP_MIN_FCPAUSE 0 #define RNP_MAX_FCPAUSE 0xFFFF @@ -196,6 +196,7 @@ struct vf_data_storage { u16 default_vf_vlan_id; u16 vlans_enabled; bool clear_to_send; + bool get_mtu_done; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 vf_vlan; // vf just can set 1 vlan @@ -242,7 +243,7 @@ struct rnpgbe_tx_buffer { bool gso_need_padding; __be16 protocol; - __be16 priv_tags; + int priv_tags; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); union { @@ -318,6 +319,7 @@ struct rnpgbe_rx_queue_stats { u64 rx_equal_count; u64 rx_clean_times; u64 rx_clean_count; + u64 rx_resync; }; enum rnpgbe_ring_state_t { @@ -331,6 +333,14 @@ enum rnpgbe_ring_state_t { __RNP_RX_FCOE, }; +enum { + PART_FW, + PART_CFG, + PART_MACSN, + PART_PCSPHY, + PART_PXE, +}; + #define ring_uses_build_skb(ring) \ test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state) @@ -372,6 +382,7 @@ struct rnpgbe_ring { #define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10)) #define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11)) #define RNP_RING_CHKSM_FIX ((u32)(1 << 12)) +#define RNP_RING_LOWER_ITR ((u32)(1 << 13)) u8 pfvfnum; u16 count; /* amount of descriptors */ @@ -453,6 +464,9 @@ static inline unsigned int rnpgbe_rx_pg_order(struct rnpgbe_ring *ring) } #define rnpgbe_rx_pg_size(_ring) (PAGE_SIZE << rnpgbe_rx_pg_order(_ring)) +#define DEFAULT_ADV (RNP_LINK_SPEED_1GB_FULL | RNP_LINK_SPEED_100_FULL | \ + RNP_LINK_SPEED_10_FULL | RNP_LINK_SPEED_10_HALF | \ + RNP_LINK_SPEED_100_HALF) struct rnpgbe_ring_container { struct rnpgbe_ring *ring; /* pointer to linked list of rings */ @@ -542,8 +556,8 @@ struct hwmon_attr { struct hwmon_buff { struct attribute_group group; const struct attribute_group *groups[2]; - struct attribute *attrs[RNP_MAX_SENSORS * 4 + 1]; - struct hwmon_attr hwmon_list[RNP_MAX_SENSORS * 4]; + struct attribute *attrs[RNPGBE_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[RNPGBE_MAX_SENSORS * 4]; unsigned int n_hwmon; }; #endif /* RNPM_HWMON */ @@ -574,7 +588,7 @@ static inline u16 rnpgbe_desc_unused_rx(struct rnpgbe_ring *ring) u16 ntc = ring->next_to_clean; u16 ntu = ring->next_to_use; - return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 16; } #define RNP_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i])) @@ -583,7 +597,7 @@ static inline u16 rnpgbe_desc_unused_rx(struct rnpgbe_ring *ring) #define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */ #define RNP_MIN_MTU 68 -#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */ +#define RNPGBE_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */ #define OTHER_VECTOR 1 #define NON_Q_VECTORS (OTHER_VECTOR) @@ -777,6 +791,7 @@ struct rnpgbe_adapter { #define RNP_FLAG2_RESET_PF ((u32)(1 << 15)) #define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16)) #define RNP_FLAG2_INSMOD ((u32)(1 << 17)) +#define RNP_FLAG2_NO_NET_REG ((u32)(1 << 18)) u32 priv_flags; #define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0) @@ -971,6 +986,7 @@ struct rnpgbe_adapter { #ifdef RNP_SYSFS #ifdef RNPGBE_HWMON struct hwmon_buff *rnpgbe_hwmon_buff; + struct device *hwmon_dev; #endif /* RNPGBE_HWMON */ #endif /* RNPM_SYSFS */ #ifdef CONFIG_DEBUG_FS @@ -1004,6 +1020,14 @@ struct rnpgbe_fdir_filter { u64 action; }; +struct crc32_info { + u32 magic; + u32 crc32; +}; + +#define CRC_OFFSET (504) +#define CRC32_MAGIC (0x43524332) + enum rnpgbe_state_t { __RNP_TESTING, __RNP_RESETTING, @@ -1039,11 +1063,13 @@ enum rnpgbe_boards { board_n20, board_n500, board_n210, + board_n210L, }; extern char rnpgbe_driver_name[]; extern const char rnpgbe_driver_version[]; +void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter); extern void rnpgbe_up(struct rnpgbe_adapter *adapter); extern void rnpgbe_down(struct rnpgbe_adapter *adapter); extern void rnpgbe_reset(struct rnpgbe_adapter *adapter); @@ -1061,8 +1087,6 @@ extern void rnpgbe_disable_rx_queue(struct rnpgbe_adapter *adapter, struct rnpgbe_ring *ring); extern void rnpgbe_update_stats(struct rnpgbe_adapter *adapter); extern int rnpgbe_init_interrupt_scheme(struct rnpgbe_adapter *adapter); -extern int rnpgbe_set_interrupt_capability(struct rnpgbe_adapter *adapter); -extern void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter); extern int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id); extern void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter); extern netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *sk_buff, @@ -1071,7 +1095,6 @@ extern netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *sk_buff, extern void rnpgbe_unmap_and_free_tx_resource(struct rnpgbe_ring *ring, struct rnpgbe_tx_buffer *tx_buffer_info); -extern void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *ring, u16 count); extern int rnpgbe_poll(struct napi_struct *napi, int budget); extern int ethtool_ioctl(struct ifreq *ifr); extern s32 rnpgbe_reinit_fdir_tables_n10(struct rnpgbe_hw *hw); @@ -1102,9 +1125,6 @@ extern int rnpgbe_setup_tc(struct net_device *dev, u8 tc); void rnpgbe_check_options(struct rnpgbe_adapter *adapter); extern int rnpgbe_open(struct net_device *netdev); extern int rnpgbe_close(struct net_device *netdev); -void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring, u32 mss_len_vf_num, - u32 inner_vlan_tunnel_len, int ignore_vlan, - bool crc_pad); void rnpgbe_maybe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring, struct rnpgbe_tx_buffer *first, u32 type_tucmd); @@ -1160,7 +1180,7 @@ static inline int ignore_veb_vlan(struct rnpgbe_adapter *adapter, union rnpgbe_rx_desc *rx_desc) { if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && - (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { + (rx_desc->wb.rev1 & cpu_to_le16(VEB_VF_IGNORE_VLAN)))) { return 1; } return 0; @@ -1198,12 +1218,19 @@ static inline bool rnpgbe_removed(void __iomem *addr) return unlikely(!addr); } +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return (struct device *)pdev; +} + #define RNP_REMOVED(a) rnpgbe_removed(a) int rnpgbe_fw_msg_handler(struct rnpgbe_adapter *adapter); -int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, +int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, int bytes); int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, int bytes); +void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter); int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size); +int rsp_hal_sfc_write_protect(struct rnpgbe_hw *hw, u32 value); #endif /* _RNPGBE_H_ */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index 98388c039db8..d2c8d7f8fdc2 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -28,35 +28,34 @@ #ifndef RNP_N500_MSIX_VECTORS #define RNP_N500_MSIX_VECTORS 26 #endif -#define RNP500_MAX_LAYER2_FILTERS 16 -#define RNP500_MAX_TUPLE5_FILTERS 128 +#define RNPGBE_MAX_LAYER2_FILTERS 16 +#define RNPGBE_MAX_TUPLE5_FILTERS 128 enum n500_priv_bits { n500_mac_loopback = 0, n500_padding_enable = 8, }; -static const char rnp500_priv_flags_strings[][ETH_GSTRING_LEN] = { -#define RNP500_MAC_LOOPBACK BIT(0) -#define RNP500_TX_SOLF_PADDING BIT(1) -#define RNP500_PADDING_DEBUG BIT(2) -#define RNP500_SIMULATE_DOWN BIT(3) -#define RNP500_ULTRA_SHORT BIT(4) -#define RNP500_DOUBLE_VLAN BIT(5) -#define RNP500_PAUSE_OWN BIT(6) -#define RNP500_STAGS_ENABLE BIT(7) -#define RNP500_JUMBO_ENABLE BIT(8) -#define RNP500_TX_PADDING BIT(9) -#define RNP500_REC_HDR_LEN_ERR BIT(10) -#define RNP500_DOUBLE_VLAN_RECEIVE BIT(11) -#define RNP500_RX_SKIP_EN BIT(12) -#define RNP500_TCP_SYNC_PRIO BIT(13) -#define RNP500_REMAP_PRIO BIT(14) -#define RNP500_8023_PRIO BIT(15) -#define RNP500_SRIOV_VLAN_MODE BIT(16) -#define RNP500_LLDP_EN BIT(17) -#define RNP500_FORCE_CLOSE BIT(18) - "mac_loopback", +static const char rnpgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define RNPGBE_FORCE_CLOSE BIT(0) +#define RNPGBE_TX_SOLF_PADDING BIT(1) +#define RNPGBE_PADDING_DEBUG BIT(2) +#define RNPGBE_SIMULATE_DOWN BIT(3) +#define RNPGBE_ULTRA_SHORT BIT(4) +#define RNPGBE_DOUBLE_VLAN BIT(5) +#define RNPGBE_PAUSE_OWN BIT(6) +#define RNPGBE_STAGS_ENABLE BIT(7) +#define RNPGBE_JUMBO_ENABLE BIT(8) +#define RNPGBE_TX_PADDING BIT(9) +#define RNPGBE_REC_HDR_LEN_ERR BIT(10) +#define RNPGBE_DOUBLE_VLAN_RECEIVE BIT(11) +#define RNPGBE_RX_SKIP_EN BIT(12) +#define RNPGBE_TCP_SYNC_PRIO BIT(13) +#define RNPGBE_REMAP_PRIO BIT(14) +#define RNPGBE_8023_PRIO BIT(15) +#define RNPGBE_SRIOV_VLAN_MODE BIT(16) +#define RNPGBE_LLDP_EN BIT(17) + "link_down_on_close", "soft_tx_padding_off", "padding_debug", "simulate_link_down", @@ -74,22 +73,20 @@ static const char rnp500_priv_flags_strings[][ETH_GSTRING_LEN] = { "8023_prio", "sriov_vlan_mode", "lldp_en", - "link_down_on_close", }; -#define RNP500_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp500_priv_flags_strings) +#define RNPGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnpgbe_priv_flags_strings) /* setup queue speed limit to max_rate */ -static void rnpgbe_dma_set_tx_maxrate_n500(struct rnpgbe_dma_info *dma, - u16 queue, u32 max_rate) +static void rnpgbe_dma_set_tx_maxrate(struct rnpgbe_dma_info *dma, + u16 queue, u32 max_rate) { } /* setup mac with vf_num to veb table */ -static void rnpgbe_dma_set_veb_mac_n500(struct rnpgbe_dma_info *dma, u8 *mac, - u32 vfnum, u32 ring) +static void rnpgbe_dma_set_veb_mac(struct rnpgbe_dma_info *dma, u8 *mac, + u32 vfnum, u32 ring) { - /* n500 only has 1 port veb table */ u32 maclow, machi, ring_vfnum; int port; @@ -97,57 +94,57 @@ static void rnpgbe_dma_set_veb_mac_n500(struct rnpgbe_dma_info *dma, u8 *mac, machi = (mac[0] << 8) | mac[1]; ring_vfnum = ring | ((0x80 | vfnum) << 8); for (port = 0; port < 1; port++) { - dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum), + dma_wr32(dma, RNPGBE_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum), maclow); - dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum), + dma_wr32(dma, RNPGBE_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum), machi); - dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vfnum), + dma_wr32(dma, RNPGBE_DMA_PORT_VEB_VF_RING_TBL(port, vfnum), ring_vfnum); } } /* setup vlan with vf_num to veb table */ -static void rnpgbe_dma_set_veb_vlan_n500(struct rnpgbe_dma_info *dma, u16 vlan, - u32 vfnum) +static void rnpgbe_dma_set_veb_vlan(struct rnpgbe_dma_info *dma, u16 vlan, + u32 vfnum) { int port; /* each vf can support only one vlan */ for (port = 0; port < 1; port++) - dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan); + dma_wr32(dma, RNPGBE_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan); } -static void rnpgbe_dma_set_veb_vlan_mask_n500(struct rnpgbe_dma_info *dma, - u16 vlan, u16 mask, int entry) +static void rnpgbe_dma_set_veb_vlan_mask(struct rnpgbe_dma_info *dma, + u16 vlan, u16 mask, int entry) { /* bit 19:12 is mask bit 11:0 is vid */ - dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(0, entry), + dma_wr32(dma, RNPGBE_DMA_PORT_VEB_VID_TBL(0, entry), (mask << 12) | vlan); } -static void rnpgbe_dma_clr_veb_all_n500(struct rnpgbe_dma_info *dma) +static void rnpgbe_dma_clr_veb_all(struct rnpgbe_dma_info *dma) { int port = 0, i; - for (i = 0; i < RNP500_VEB_TBL_CNTS; i++) { - dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, i), 0); - dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, i), 0); - dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, i), 0); - dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, i), 0); + for (i = 0; i < RNPGBE_VEB_TBL_CNTS; i++) { + dma_wr32(dma, RNPGBE_DMA_PORT_VBE_MAC_LO_TBL(port, i), 0); + dma_wr32(dma, RNPGBE_DMA_PORT_VBE_MAC_HI_TBL(port, i), 0); + dma_wr32(dma, RNPGBE_DMA_PORT_VEB_VID_TBL(port, i), 0); + dma_wr32(dma, RNPGBE_DMA_PORT_VEB_VF_RING_TBL(port, i), 0); } } -static struct rnpgbe_dma_operations dma_ops_n500 = { - .set_tx_maxrate = &rnpgbe_dma_set_tx_maxrate_n500, - .set_veb_mac = &rnpgbe_dma_set_veb_mac_n500, - .set_veb_vlan = &rnpgbe_dma_set_veb_vlan_n500, - .set_veb_vlan_mask = &rnpgbe_dma_set_veb_vlan_mask_n500, - .clr_veb_all = &rnpgbe_dma_clr_veb_all_n500, +static struct rnpgbe_dma_operations dma_ops_rnpgbe = { + .set_tx_maxrate = &rnpgbe_dma_set_tx_maxrate, + .set_veb_mac = &rnpgbe_dma_set_veb_mac, + .set_veb_vlan = &rnpgbe_dma_set_veb_vlan, + .set_veb_vlan_mask = &rnpgbe_dma_set_veb_vlan_mask, + .clr_veb_all = &rnpgbe_dma_clr_veb_all, }; /** - * rnpgbe_eth_set_rar_n500 - Set Rx address register + * rnpgbe_eth_set_rar - Set Rx address register * @eth: pointer to eth structure * @index: Receive address register to write * @addr: Address to put into receive address register @@ -155,9 +152,9 @@ static struct rnpgbe_dma_operations dma_ops_n500 = { * * Puts an ethernet address into a receive address register. **/ -static s32 rnpgbe_eth_set_rar_n500(struct rnpgbe_eth_info *eth, u32 index, - u8 *addr, - bool enable_addr) +static s32 rnpgbe_eth_set_rar(struct rnpgbe_eth_info *eth, u32 index, + u8 *addr, + bool enable_addr) { u32 mcstctrl; u32 rar_low, rar_high = 0; @@ -180,36 +177,36 @@ static s32 rnpgbe_eth_set_rar_n500(struct rnpgbe_eth_info *eth, u32 index, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ - rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index)); - rar_high &= ~(0x0000FFFF | RNP500_RAH_AV); + rar_high = eth_rd32(eth, RNPGBE_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNPGBE_RAH_AV); rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8)); if (enable_addr) - rar_high |= RNP500_RAH_AV; + rar_high |= RNPGBE_RAH_AV; - eth_wr32(eth, RNP500_ETH_RAR_RL(index), rar_low); - eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high); + eth_wr32(eth, RNPGBE_ETH_RAR_RL(index), rar_low); + eth_wr32(eth, RNPGBE_ETH_RAR_RH(index), rar_high); /* open unicast filter */ /* we now not use unicast */ /* but we must open this since dest-mac filter | unicast table */ /* all packets up if close unicast table */ - mcstctrl = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL); - mcstctrl |= RNP500_MCSTCTRL_UNICASE_TBL_EN; - eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, mcstctrl); + mcstctrl = eth_rd32(eth, RNPGBE_ETH_DMAC_MCSTCTRL); + mcstctrl |= RNPGBE_MCSTCTRL_UNICASE_TBL_EN; + eth_wr32(eth, RNPGBE_ETH_DMAC_MCSTCTRL, mcstctrl); return 0; } /** - * rnpgbe_eth_clear_rar_n500 - Remove Rx address register + * rnpgbe_eth_clear_rar - Remove Rx address register * @eth: pointer to eth structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ -static s32 rnpgbe_eth_clear_rar_n500(struct rnpgbe_eth_info *eth, - u32 index) +static s32 rnpgbe_eth_clear_rar(struct rnpgbe_eth_info *eth, + u32 index) { u32 rar_high; u32 rar_entries = eth->num_rar_entries; @@ -224,11 +221,11 @@ static s32 rnpgbe_eth_clear_rar_n500(struct rnpgbe_eth_info *eth, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ - rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index)); - rar_high &= ~(0x0000FFFF | RNP500_RAH_AV); + rar_high = eth_rd32(eth, RNPGBE_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNPGBE_RAH_AV); - eth_wr32(eth, RNP500_ETH_RAR_RL(index), 0); - eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high); + eth_wr32(eth, RNPGBE_ETH_RAR_RL(index), 0); + eth_wr32(eth, RNPGBE_ETH_RAR_RH(index), rar_high); /* clear VMDq pool/queue selection for this RAR */ eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL); @@ -237,14 +234,14 @@ static s32 rnpgbe_eth_clear_rar_n500(struct rnpgbe_eth_info *eth, } /** - * rnpgbe_eth_set_vmdq_n500 - Associate a VMDq pool index with a rx address + * rnpgbe_eth_set_vmdq - Associate a VMDq pool index with a rx address * @eth: pointer to eth struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq pool index * only mac->vf **/ -static s32 rnpgbe_eth_set_vmdq_n500(struct rnpgbe_eth_info *eth, - u32 rar, u32 vmdq) +static s32 rnpgbe_eth_set_vmdq(struct rnpgbe_eth_info *eth, + u32 rar, u32 vmdq) { u32 rar_entries = eth->num_rar_entries; @@ -254,19 +251,19 @@ static s32 rnpgbe_eth_set_vmdq_n500(struct rnpgbe_eth_info *eth, return RNP_ERR_INVALID_ARGUMENT; } - eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), vmdq); + eth_wr32(eth, RNPGBE_VM_DMAC_MPSAR_RING(rar), vmdq); return 0; } /** - * rnpgbe_eth_clear_vmdq_n500 - Disassociate a VMDq pool index from a rx address + * rnpgbe_eth_clear_vmdq - Disassociate a VMDq pool index from a rx address * @eth: pointer to eth struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ -static s32 rnpgbe_eth_clear_vmdq_n500(struct rnpgbe_eth_info *eth, - u32 rar, u32 vmdq) +static s32 rnpgbe_eth_clear_vmdq(struct rnpgbe_eth_info *eth, + u32 rar, u32 vmdq) { u32 rar_entries = eth->num_rar_entries; @@ -276,12 +273,12 @@ static s32 rnpgbe_eth_clear_vmdq_n500(struct rnpgbe_eth_info *eth, return RNP_ERR_INVALID_ARGUMENT; } - eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), 0); + eth_wr32(eth, RNPGBE_VM_DMAC_MPSAR_RING(rar), 0); return 0; } -static s32 rnp500_mta_vector(struct rnpgbe_eth_info *eth, u8 *mc_addr) +static s32 rnpgbe_mta_vector(struct rnpgbe_eth_info *eth, u8 *mc_addr) { u32 vector = 0; @@ -323,7 +320,7 @@ static s32 rnp500_mta_vector(struct rnpgbe_eth_info *eth, u8 *mc_addr) return vector; } -static void rnp500_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr) +static void rnpgbe_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; @@ -332,7 +329,7 @@ static void rnp500_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr) hw->addr_ctrl.mta_in_use++; - vector = rnp500_mta_vector(eth, mc_addr); + vector = rnpgbe_mta_vector(eth, mc_addr); /* The MTA is a register array of 128 32-bit registers. It is treated * like an array of 4096 bits. We want to set bit @@ -349,7 +346,7 @@ static void rnp500_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr) eth->mta_shadow[vector_reg] |= (1 << vector_bit); } -static void rnp500_set_vf_mta(struct rnpgbe_hw *hw, u16 vector) +static void rnpgbe_set_vf_mta(struct rnpgbe_hw *hw, u16 vector) { /* vf/pf use the same multicast table */ u32 vector_bit; @@ -385,7 +382,7 @@ static u8 *rnpgbe_addr_list_itr(struct rnpgbe_hw __maybe_unused *hw, } /** - * rnpgbe_eth_update_mc_addr_list_n500 - Updates MAC list of multicast addresses + * rnpgbe_eth_update_mc_addr_list - Updates MAC list of multicast addresses * @eth: pointer to hardware structure * @netdev: pointer to net device structure * @sriov_on: sriov status @@ -395,9 +392,9 @@ static u8 *rnpgbe_addr_list_itr(struct rnpgbe_hw __maybe_unused *hw, * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ -static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth, - struct net_device *netdev, - bool sriov_on) +static s32 rnpgbe_eth_update_mc_addr_list(struct rnpgbe_eth_info *eth, + struct net_device *netdev, + bool sriov_on) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back; struct netdev_hw_addr *ha; @@ -429,7 +426,7 @@ static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth, addr_list = ha->addr; for (i = 0; i < addr_count; i++) { eth_dbg(eth, " Adding the multicast addresses:\n"); - rnp500_set_mta(hw, rnpgbe_addr_list_itr(hw, &addr_list)); + rnpgbe_set_mta(hw, rnpgbe_addr_list_itr(hw, &addr_list)); } if (!sriov_on) @@ -445,7 +442,7 @@ static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth, vfinfo = &adapter->vfinfo[i]; for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) - rnp500_set_vf_mta(hw, vfinfo->vf_mc_hashes[j]); + rnpgbe_set_vf_mta(hw, vfinfo->vf_mc_hashes[j]); } skip_sriov: @@ -453,21 +450,21 @@ static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth, for (i = NCSI_RAR_NUM; i < NCSI_MC_NUM; i++) { ret = hw->ops.get_ncsi_mac(hw, ncsi_mc_addr, i); if (!ret) - rnp500_set_mta(hw, ncsi_mc_addr); + rnpgbe_set_mta(hw, ncsi_mc_addr); } /* Enable mta */ for (i = 0; i < hw->eth.mcft_size; i++) { if (hw->addr_ctrl.mta_in_use) { - eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i), + eth_wr32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(i), eth->mta_shadow[i]); } } if (hw->addr_ctrl.mta_in_use > 0) { - v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL); - eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, - v | RNP500_MCSTCTRL_MULTICASE_TBL_EN | + v = eth_rd32(eth, RNPGBE_ETH_DMAC_MCSTCTRL); + eth_wr32(eth, RNPGBE_ETH_DMAC_MCSTCTRL, + v | RNPGBE_MCSTCTRL_MULTICASE_TBL_EN | eth->mc_filter_type); } @@ -478,27 +475,27 @@ static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth, } /* clean all mc addr */ -static void rnpgbe_eth_clr_mc_addr_n500(struct rnpgbe_eth_info *eth) +static void rnpgbe_eth_clr_mc_addr(struct rnpgbe_eth_info *eth) { int i; for (i = 0; i < eth->mcft_size; i++) - eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i), 0); + eth_wr32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(i), 0); } /** - * rnpgbe_eth_set_rss_hfunc_n500 - Remove Rx address register + * rnpgbe_eth_set_rss_hfunc - Remove Rx address register * @eth: pointer to eth structure * @hfunc: hash function type * * update rss key to eth regs **/ -static int rnpgbe_eth_set_rss_hfunc_n500(struct rnpgbe_eth_info *eth, - int hfunc) +static int rnpgbe_eth_set_rss_hfunc(struct rnpgbe_eth_info *eth, + int hfunc) { u32 data; - data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL); + data = eth_rd32(eth, RNPGBE_ETH_RSS_CONTROL); data &= ~(BIT(14) | BIT(15)); if (hfunc == rss_func_top) @@ -511,54 +508,52 @@ static int rnpgbe_eth_set_rss_hfunc_n500(struct rnpgbe_eth_info *eth, return -EINVAL; /* update to hardware */ - eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data); + eth_wr32(eth, RNPGBE_ETH_RSS_CONTROL, data); return 0; } /** - * rnpgbe_eth_update_rss_key_n500 - Remove Rx address register + * rnpgbe_eth_update_rss_key - Remove Rx address register * @eth: pointer to eth structure * @sriov_flag: sriov status * * update rss key to eth regs **/ -static void rnpgbe_eth_update_rss_key_n500(struct rnpgbe_eth_info *eth, - bool sriov_flag) +static void rnpgbe_eth_update_rss_key(struct rnpgbe_eth_info *eth, + bool sriov_flag) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back; int i; - u8 *key_temp; + u8 key_temp[RNP_RSS_KEY_SIZE]; int key_len = RNP_RSS_KEY_SIZE; u8 *key = hw->rss_key; u32 data; - u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0; + u32 iov_en = (sriov_flag) ? RNPGBE_IOV_ENABLED : 0; u32 *value; - data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL); + data = eth_rd32(eth, RNPGBE_ETH_RSS_CONTROL); - key_temp = kmalloc(key_len, GFP_KERNEL); /* reoder the key */ for (i = 0; i < key_len; i++) *(key_temp + key_len - i - 1) = *(key + i); value = (u32 *)key_temp; for (i = 0; i < key_len; i = i + 4) - eth_wr32(eth, RNP500_ETH_RSS_KEY + i, *(value + i / 4)); - kfree(key_temp); + eth_wr32(eth, RNPGBE_ETH_RSS_KEY + i, *(value + i / 4)); - data |= (RNP500_ETH_ENABLE_RSS_ONLY | iov_en); - eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data); + data |= (RNPGBE_ETH_ENABLE_RSS_ONLY | iov_en); + eth_wr32(eth, RNPGBE_ETH_RSS_CONTROL, data); } /** - * rnpgbe_eth_update_rss_table_n500 - Remove Rx address register + * rnpgbe_eth_update_rss_table - Remove Rx address register * @eth: pointer to eth structure * * update rss table to eth regs **/ -static void rnpgbe_eth_update_rss_table_n500(struct rnpgbe_eth_info *eth) +static void rnpgbe_eth_update_rss_table(struct rnpgbe_eth_info *eth) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back; u32 reta_entries = hw->rss_indir_tbl_num; @@ -566,28 +561,28 @@ static void rnpgbe_eth_update_rss_table_n500(struct rnpgbe_eth_info *eth) int i; for (i = 0; i < tc_entries; i++) - eth_wr32(eth, RNP500_ETH_TC_IPH_OFFSET_TABLE(i), + eth_wr32(eth, RNPGBE_ETH_TC_IPH_OFFSET_TABLE(i), hw->rss_tc_tbl[i]); for (i = 0; i < reta_entries; i++) - eth_wr32(eth, RNP500_ETH_RSS_INDIR_TBL(i), + eth_wr32(eth, RNPGBE_ETH_RSS_INDIR_TBL(i), hw->rss_indir_tbl[i]); /* if we update rss table ,we should update deault ring same with rss[0] */ - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, hw->rss_indir_tbl[0]); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_RING, hw->rss_indir_tbl[0]); } /** - * rnpgbe_eth_set_vfta_n500 - Set VLAN filter table + * rnpgbe_eth_set_vfta - Set VLAN filter table * @eth: pointer to eth structure * @vlan: VLAN id to write to VLAN filter * @vlan_on: boolean flag to turn on/off VLAN in VFVF * * Turn on/off specified VLAN in the VLAN filter table. **/ -static s32 rnpgbe_eth_set_vfta_n500(struct rnpgbe_eth_info *eth, - u32 vlan, - bool vlan_on) +static s32 rnpgbe_eth_set_vfta(struct rnpgbe_eth_info *eth, + u32 vlan, + bool vlan_on) { s32 regindex; u32 bitindex; @@ -607,7 +602,7 @@ static s32 rnpgbe_eth_set_vfta_n500(struct rnpgbe_eth_info *eth, regindex = (vlan >> 5) & 0x7F; bitindex = vlan & 0x1F; targetbit = (1 << bitindex); - vfta = eth_rd32(eth, RNP500_VFTA(regindex)); + vfta = eth_rd32(eth, RNPGBE_VFTA(regindex)); if (vlan_on) { if (!(vfta & targetbit)) { @@ -622,30 +617,30 @@ static s32 rnpgbe_eth_set_vfta_n500(struct rnpgbe_eth_info *eth, } if (vfta_changed) - eth_wr32(eth, RNP500_VFTA(regindex), vfta); + eth_wr32(eth, RNPGBE_VFTA(regindex), vfta); return 0; } -static void rnpgbe_eth_clr_vfta_n500(struct rnpgbe_eth_info *eth) +static void rnpgbe_eth_clr_vfta(struct rnpgbe_eth_info *eth) { u32 offset; for (offset = 0; offset < eth->vft_size; offset++) - eth_wr32(eth, RNP500_VFTA(offset), 0); + eth_wr32(eth, RNPGBE_VFTA(offset), 0); } -static void rnpgbe_eth_set_doulbe_vlan_n500(struct rnpgbe_eth_info *eth, - bool on) +static void rnpgbe_eth_set_doulbe_vlan(struct rnpgbe_eth_info *eth, + bool on) { if (on) - eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 1); + eth_wr32(eth, RNPGBE_ETH_VLAN_RM_TYPE, 1); else - eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 0); + eth_wr32(eth, RNPGBE_ETH_VLAN_RM_TYPE, 0); } -static void rnpgbe_eth_set_outer_vlan_type_n500(struct rnpgbe_eth_info *eth, - int type) +static void rnpgbe_eth_set_outer_vlan_type(struct rnpgbe_eth_info *eth, + int type) { u32 data = 0x88a8; @@ -660,124 +655,124 @@ static void rnpgbe_eth_set_outer_vlan_type_n500(struct rnpgbe_eth_info *eth, data = 0x9200; break; } - eth_wr32(eth, RNP500_ETH_WRAP_FIELD_TYPE, data); - eth_wr32(eth, RNP500_ETH_TX_VLAN_TYPE, data); + eth_wr32(eth, RNPGBE_ETH_WRAP_FIELD_TYPE, data); + eth_wr32(eth, RNPGBE_ETH_TX_VLAN_TYPE, data); } /** - * rnpgbe_eth_set_vlan_filter_n500 - Set VLAN filter table + * rnpgbe_eth_set_vlan_filter - Set VLAN filter table * @eth: pointer to eth structure * @status: on |off * Turn on/off VLAN filter table. **/ -static void rnpgbe_eth_set_vlan_filter_n500(struct rnpgbe_eth_info *eth, - bool status) +static void rnpgbe_eth_set_vlan_filter(struct rnpgbe_eth_info *eth, + bool status) { #define ETH_VLAN_FILTER_BIT (30) - u32 value = eth_rd32(eth, RNP500_ETH_VLAN_FILTER_ENABLE); + u32 value = eth_rd32(eth, RNPGBE_ETH_VLAN_FILTER_ENABLE); /* clear bit first */ value &= (~(0x01 << ETH_VLAN_FILTER_BIT)); if (status) value |= (0x01 << ETH_VLAN_FILTER_BIT); - eth_wr32(eth, RNP500_ETH_VLAN_FILTER_ENABLE, value); + eth_wr32(eth, RNPGBE_ETH_VLAN_FILTER_ENABLE, value); } -static u16 rnpgbe_layer2_pritologic_n500(u16 hw_id) +static u16 rnpgbe_layer2_pritologic(u16 hw_id) { return hw_id; } -static void rnpgbe_eth_set_layer2_n500(struct rnpgbe_eth_info *eth, - union rnpgbe_atr_input *input, - u16 pri_id, - u8 queue, bool prio_flag) +static void rnpgbe_eth_set_layer2(struct rnpgbe_eth_info *eth, + union rnpgbe_atr_input *input, + u16 pri_id, + u8 queue, bool prio_flag) { u16 hw_id; - hw_id = rnpgbe_layer2_pritologic_n500(pri_id); + hw_id = rnpgbe_layer2_pritologic(pri_id); /* enable layer2 */ - eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id), + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQF(hw_id), (0x1 << 31) | (ntohs(input->layer2_formate.proto))); /* setup action */ if (queue == RNP_FDIR_DROP_QUEUE) { - eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id), (0x1 << 31)); + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQS(hw_id), (0x1 << 31)); } else { /* setup ring_number */ if (prio_flag) { - eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id), + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQS(hw_id), (0x1 << 30) | (queue << 20) | (0x1 << 28)); } else { - eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id), + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQS(hw_id), (0x1 << 30) | (queue << 20)); } } } -static void rnpgbe_eth_clr_layer2_n500(struct rnpgbe_eth_info *eth, u16 pri_id) +static void rnpgbe_eth_clr_layer2(struct rnpgbe_eth_info *eth, u16 pri_id) { u16 hw_id; - hw_id = rnpgbe_layer2_pritologic_n500(pri_id); - eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id), 0); + hw_id = rnpgbe_layer2_pritologic(pri_id); + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQF(hw_id), 0); } -static void rnpgbe_eth_clr_all_layer2_n500(struct rnpgbe_eth_info *eth) +static void rnpgbe_eth_clr_all_layer2(struct rnpgbe_eth_info *eth) { int i; -#define RNP500_MAX_LAYER2_FILTERS 16 - for (i = 0; i < RNP500_MAX_LAYER2_FILTERS; i++) - eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(i), 0); +#define RNPGBE_MAX_LAYER2_FILTERS 16 + for (i = 0; i < RNPGBE_MAX_LAYER2_FILTERS; i++) + eth_wr32(eth, RNPGBE_ETH_LAYER2_ETQF(i), 0); } -static u16 rnpgbe_tuple5_pritologic_n500(u16 hw_id) +static u16 rnpgbe_tuple5_pritologic(u16 hw_id) { return hw_id; } -static void rnpgbe_eth_set_tuple5_n500(struct rnpgbe_eth_info *eth, - union rnpgbe_atr_input *input, - u16 pri_id, - u8 queue, bool prio_flag) +static void rnpgbe_eth_set_tuple5(struct rnpgbe_eth_info *eth, + union rnpgbe_atr_input *input, + u16 pri_id, + u8 queue, bool prio_flag) { -#define RNP500_SRC_IP_MASK BIT(0) -#define RNP500_DST_IP_MASK BIT(1) -#define RNP500_SRC_PORT_MASK BIT(2) -#define RNP500_DST_PORT_MASK BIT(3) -#define RNP500_L4_PROTO_MASK BIT(4) +#define RNPGBE_SRC_IP_MASK BIT(0) +#define RNPGBE_DST_IP_MASK BIT(1) +#define RNPGBE_SRC_PORT_MASK BIT(2) +#define RNPGBE_DST_PORT_MASK BIT(3) +#define RNPGBE_L4_PROTO_MASK BIT(4) u32 port = 0; u8 mask_temp = 0; u8 l4_proto_type = 0; u16 hw_id; - hw_id = rnpgbe_tuple5_pritologic_n500(pri_id); + hw_id = rnpgbe_tuple5_pritologic(pri_id); if (input->formatted.src_ip[0] != 0) { - eth_wr32(eth, RNP500_ETH_TUPLE5_SAQF(hw_id), - htonl(input->formatted.src_ip[0])); + eth_wr32(eth, RNPGBE_ETH_TUPLE5_SAQF(hw_id), + ntohl(input->formatted.src_ip[0])); } else { - mask_temp |= RNP500_SRC_IP_MASK; + mask_temp |= RNPGBE_SRC_IP_MASK; } if (input->formatted.dst_ip[0] != 0) { - eth_wr32(eth, RNP500_ETH_TUPLE5_DAQF(hw_id), - htonl(input->formatted.dst_ip[0])); + eth_wr32(eth, RNPGBE_ETH_TUPLE5_DAQF(hw_id), + ntohl(input->formatted.dst_ip[0])); } else { - mask_temp |= RNP500_DST_IP_MASK; + mask_temp |= RNPGBE_DST_IP_MASK; } if (input->formatted.src_port != 0) - port |= (htons(input->formatted.src_port)); + port |= (ntohs(input->formatted.src_port)); else - mask_temp |= RNP500_SRC_PORT_MASK; + mask_temp |= RNPGBE_SRC_PORT_MASK; if (input->formatted.dst_port != 0) - port |= (htons(input->formatted.dst_port) << 16); + port |= (ntohs(input->formatted.dst_port) << 16); else - mask_temp |= RNP500_DST_PORT_MASK; + mask_temp |= RNPGBE_DST_PORT_MASK; if (port != 0) - eth_wr32(eth, RNP500_ETH_TUPLE5_SDPQF(hw_id), port); + eth_wr32(eth, RNPGBE_ETH_TUPLE5_SDPQF(hw_id), port); switch (input->formatted.flow_type) { case RNP_ATR_FLOW_TYPE_TCPV4: @@ -797,85 +792,85 @@ static void rnpgbe_eth_set_tuple5_n500(struct rnpgbe_eth_info *eth, } if (l4_proto_type == 0) - mask_temp |= RNP500_L4_PROTO_MASK; + mask_temp |= RNPGBE_L4_PROTO_MASK; /* setup ftqf */ /* always set 0x3 */ - eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id), + eth_wr32(eth, RNPGBE_ETH_TUPLE5_FTQF(hw_id), (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | 0x3); /* setup action */ if (queue == RNP_FDIR_DROP_QUEUE) { - eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id), (0x1 << 31)); + eth_wr32(eth, RNPGBE_ETH_TUPLE5_POLICY(hw_id), (0x1 << 31)); } else { /* setup ring_number */ if (prio_flag) { - eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id), + eth_wr32(eth, RNPGBE_ETH_TUPLE5_POLICY(hw_id), ((0x1 << 30) | (queue << 20) | (0x1 << 28))); } else { - eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id), + eth_wr32(eth, RNPGBE_ETH_TUPLE5_POLICY(hw_id), ((0x1 << 30) | (queue << 20))); } } } -static void rnpgbe_eth_clr_tuple5_n500(struct rnpgbe_eth_info *eth, - u16 pri_id) +static void rnpgbe_eth_clr_tuple5(struct rnpgbe_eth_info *eth, + u16 pri_id) { u16 hw_id; - hw_id = rnpgbe_tuple5_pritologic_n500(pri_id); - eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id), 0); + hw_id = rnpgbe_tuple5_pritologic(pri_id); + eth_wr32(eth, RNPGBE_ETH_TUPLE5_FTQF(hw_id), 0); } -static void rnpgbe_eth_clr_all_tuple5_n500(struct rnpgbe_eth_info *eth) +static void rnpgbe_eth_clr_all_tuple5(struct rnpgbe_eth_info *eth) { int i; - for (i = 0; i < RNP500_MAX_TUPLE5_FILTERS; i++) - eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(i), 0); + for (i = 0; i < RNPGBE_MAX_TUPLE5_FILTERS; i++) + eth_wr32(eth, RNPGBE_ETH_TUPLE5_FTQF(i), 0); } -static void rnpgbe_eth_set_tcp_sync_n500(struct rnpgbe_eth_info *eth, - int queue, - bool flag, bool prio) +static void rnpgbe_eth_set_tcp_sync(struct rnpgbe_eth_info *eth, + int queue, + bool flag, bool prio) { if (flag) { - eth_wr32(eth, RNP500_ETH_SYNQF, (0x1 << 30) | (queue << 20)); + eth_wr32(eth, RNPGBE_ETH_SYNQF, (0x1 << 30) | (queue << 20)); if (prio) - eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, BIT(31) | 0x1); + eth_wr32(eth, RNPGBE_ETH_SYNQF_PRIORITY, BIT(31) | 0x1); else - eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, BIT(31)); + eth_wr32(eth, RNPGBE_ETH_SYNQF_PRIORITY, BIT(31)); } else { - eth_wr32(eth, RNP500_ETH_SYNQF, 0); - eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, 0); + eth_wr32(eth, RNPGBE_ETH_SYNQF, 0); + eth_wr32(eth, RNPGBE_ETH_SYNQF_PRIORITY, 0); } } -static void rnpgbe_eth_set_rx_skip_n500(struct rnpgbe_eth_info *eth, - int count, - bool flag) +static void rnpgbe_eth_set_rx_skip(struct rnpgbe_eth_info *eth, + int count, + bool flag) { if (flag) { - eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, + eth_wr32(eth, RNPGBE_ETH_PRIV_DATA_CONTROL_REG, PRIV_DATA_EN | count); } else { - eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, 0); + eth_wr32(eth, RNPGBE_ETH_PRIV_DATA_CONTROL_REG, 0); } } -static void rnpgbe_eth_set_min_max_packets_n500(struct rnpgbe_eth_info *eth, - int min, int max) +static void rnpgbe_eth_set_min_max_packets(struct rnpgbe_eth_info *eth, + int min, int max) { - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min); - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MAX_LEN, max); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_MAX_LEN, max); } -static void rnpgbe_eth_set_vlan_strip_n500(struct rnpgbe_eth_info *eth, - u16 queue, bool enable) +static void rnpgbe_eth_set_vlan_strip(struct rnpgbe_eth_info *eth, + u16 queue, bool enable) { - u32 reg = RNP500_ETH_VLAN_VME_REG(queue / 32); + u32 reg = RNPGBE_ETH_VLAN_VME_REG(queue / 32); u32 offset = queue % 32; u32 data = eth_rd32(eth, reg); @@ -887,56 +882,56 @@ static void rnpgbe_eth_set_vlan_strip_n500(struct rnpgbe_eth_info *eth, eth_wr32(eth, reg, data); } -static void rnpgbe_eth_set_rx_hash_n500(struct rnpgbe_eth_info *eth, - bool status, bool sriov_flag) +static void rnpgbe_eth_set_rx_hash(struct rnpgbe_eth_info *eth, + bool status, bool sriov_flag) { - u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0; + u32 iov_en = (sriov_flag) ? RNPGBE_IOV_ENABLED : 0; u32 data; - data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL); - data &= ~RNP500_ETH_RSS_MASK; + data = eth_rd32(eth, RNPGBE_ETH_RSS_CONTROL); + data &= ~RNPGBE_ETH_RSS_MASK; if (status) { - data |= RNP500_ETH_ENABLE_RSS_ONLY; - eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en); + data |= RNPGBE_ETH_ENABLE_RSS_ONLY; + eth_wr32(eth, RNPGBE_ETH_RSS_CONTROL, data | iov_en); } else { - eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en); + eth_wr32(eth, RNPGBE_ETH_RSS_CONTROL, data | iov_en); } } -static void rnpgbe_eth_set_rx_n500(struct rnpgbe_eth_info *eth, bool status) +static void rnpgbe_eth_set_rx(struct rnpgbe_eth_info *eth, bool status) { if (status) { - eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 0); - eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 0); + eth_wr32(eth, RNPGBE_ETH_EXCEPT_DROP_PROC, 0); + eth_wr32(eth, RNPGBE_ETH_TX_MUX_DROP, 0); } else { - eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 1); - eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 1); + eth_wr32(eth, RNPGBE_ETH_EXCEPT_DROP_PROC, 1); + eth_wr32(eth, RNPGBE_ETH_TX_MUX_DROP, 1); } } -static void rnpgbe_eth_fcs_n500(struct rnpgbe_eth_info *eth, bool status) +static void rnpgbe_eth_fcs(struct rnpgbe_eth_info *eth, bool status) { if (status) - eth_wr32(eth, RNP500_ETH_FCS_EN, 1); + eth_wr32(eth, RNPGBE_ETH_FCS_EN, 1); else - eth_wr32(eth, RNP500_ETH_FCS_EN, 0); + eth_wr32(eth, RNPGBE_ETH_FCS_EN, 0); } -static void rnpgbe_eth_set_vf_vlan_mode_n500(struct rnpgbe_eth_info *eth, - u16 vlan, int vf, bool enable) +static void rnpgbe_eth_set_vf_vlan_mode(struct rnpgbe_eth_info *eth, + u16 vlan, int vf, bool enable) { u32 value = vlan; if (enable) value |= BIT(31); - eth_wr32(eth, RNP500_VLVF(vf), value); + eth_wr32(eth, RNPGBE_VLVF(vf), value); /* n500 1 vf only can setup 1 vlan */ - eth_wr32(eth, RNP500_VLVF_TABLE(vf), vf); + eth_wr32(eth, RNPGBE_VLVF_TABLE(vf), vf); } -static s32 rnpgbe_eth_set_fc_mode_n500(struct rnpgbe_eth_info *eth) +static s32 rnpgbe_eth_set_fc_mode(struct rnpgbe_eth_info *eth) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back; s32 ret_val = 0; @@ -955,11 +950,11 @@ static s32 rnpgbe_eth_set_fc_mode_n500(struct rnpgbe_eth_info *eth) if ((hw->fc.current_mode & rnpgbe_fc_tx_pause)) { if (hw->fc.high_water[i]) { - eth_wr32(eth, RNP500_ETH_HIGH_WATER(i), + eth_wr32(eth, RNPGBE_ETH_HIGH_WATER(i), hw->fc.high_water[i]); } if (hw->fc.low_water[i]) { - eth_wr32(eth, RNP500_ETH_LOW_WATER(i), + eth_wr32(eth, RNPGBE_ETH_LOW_WATER(i), hw->fc.low_water[i]); } } @@ -967,40 +962,40 @@ static s32 rnpgbe_eth_set_fc_mode_n500(struct rnpgbe_eth_info *eth) return ret_val; } -static struct rnpgbe_eth_operations eth_ops_n500 = { - .set_rar = &rnpgbe_eth_set_rar_n500, - .clear_rar = &rnpgbe_eth_clear_rar_n500, - .set_vmdq = &rnpgbe_eth_set_vmdq_n500, - .clear_vmdq = &rnpgbe_eth_clear_vmdq_n500, - .update_mc_addr_list = &rnpgbe_eth_update_mc_addr_list_n500, - .clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500, - .set_rss_hfunc = &rnpgbe_eth_set_rss_hfunc_n500, - .set_rss_key = &rnpgbe_eth_update_rss_key_n500, - .set_rss_table = &rnpgbe_eth_update_rss_table_n500, - .set_rx_hash = &rnpgbe_eth_set_rx_hash_n500, - .set_layer2_remapping = &rnpgbe_eth_set_layer2_n500, - .clr_layer2_remapping = &rnpgbe_eth_clr_layer2_n500, - .clr_all_layer2_remapping = &rnpgbe_eth_clr_all_layer2_n500, - .set_tuple5_remapping = &rnpgbe_eth_set_tuple5_n500, - .clr_tuple5_remapping = &rnpgbe_eth_clr_tuple5_n500, - .clr_all_tuple5_remapping = &rnpgbe_eth_clr_all_tuple5_n500, - .set_tcp_sync_remapping = &rnpgbe_eth_set_tcp_sync_n500, - .set_rx_skip = &rnpgbe_eth_set_rx_skip_n500, - .set_min_max_packet = &rnpgbe_eth_set_min_max_packets_n500, - .set_vlan_strip = &rnpgbe_eth_set_vlan_strip_n500, - .set_vfta = &rnpgbe_eth_set_vfta_n500, - .clr_vfta = &rnpgbe_eth_clr_vfta_n500, - .set_vlan_filter = &rnpgbe_eth_set_vlan_filter_n500, - .set_outer_vlan_type = &rnpgbe_eth_set_outer_vlan_type_n500, - .set_double_vlan = &rnpgbe_eth_set_doulbe_vlan_n500, - .set_fc_mode = &rnpgbe_eth_set_fc_mode_n500, - .set_rx = &rnpgbe_eth_set_rx_n500, - .set_fcs = &rnpgbe_eth_fcs_n500, - .set_vf_vlan_mode = &rnpgbe_eth_set_vf_vlan_mode_n500, +static struct rnpgbe_eth_operations eth_ops_rnpgbe = { + .set_rar = &rnpgbe_eth_set_rar, + .clear_rar = &rnpgbe_eth_clear_rar, + .set_vmdq = &rnpgbe_eth_set_vmdq, + .clear_vmdq = &rnpgbe_eth_clear_vmdq, + .update_mc_addr_list = &rnpgbe_eth_update_mc_addr_list, + .clr_mc_addr = &rnpgbe_eth_clr_mc_addr, + .set_rss_hfunc = &rnpgbe_eth_set_rss_hfunc, + .set_rss_key = &rnpgbe_eth_update_rss_key, + .set_rss_table = &rnpgbe_eth_update_rss_table, + .set_rx_hash = &rnpgbe_eth_set_rx_hash, + .set_layer2_remapping = &rnpgbe_eth_set_layer2, + .clr_layer2_remapping = &rnpgbe_eth_clr_layer2, + .clr_all_layer2_remapping = &rnpgbe_eth_clr_all_layer2, + .set_tuple5_remapping = &rnpgbe_eth_set_tuple5, + .clr_tuple5_remapping = &rnpgbe_eth_clr_tuple5, + .clr_all_tuple5_remapping = &rnpgbe_eth_clr_all_tuple5, + .set_tcp_sync_remapping = &rnpgbe_eth_set_tcp_sync, + .set_rx_skip = &rnpgbe_eth_set_rx_skip, + .set_min_max_packet = &rnpgbe_eth_set_min_max_packets, + .set_vlan_strip = &rnpgbe_eth_set_vlan_strip, + .set_vfta = &rnpgbe_eth_set_vfta, + .clr_vfta = &rnpgbe_eth_clr_vfta, + .set_vlan_filter = &rnpgbe_eth_set_vlan_filter, + .set_outer_vlan_type = &rnpgbe_eth_set_outer_vlan_type, + .set_double_vlan = &rnpgbe_eth_set_doulbe_vlan, + .set_fc_mode = &rnpgbe_eth_set_fc_mode, + .set_rx = &rnpgbe_eth_set_rx, + .set_fcs = &rnpgbe_eth_fcs, + .set_vf_vlan_mode = &rnpgbe_eth_set_vf_vlan_mode, }; /** - * rnpgbe_init_hw_ops_n500 - Generic hardware initialization + * rnpgbe_init_hw_ops - Generic hardware initialization * @hw: pointer to hardware structure * * Initialize the hardware by resetting the hardware, filling the bus info @@ -1009,7 +1004,7 @@ static struct rnpgbe_eth_operations eth_ops_n500 = { * up link and flow control settings, and leaves transmit and receive units * disabled and uninitialized **/ -static s32 rnpgbe_init_hw_ops_n500(struct rnpgbe_hw *hw) +static s32 rnpgbe_init_hw_ops(struct rnpgbe_hw *hw) { s32 status = 0; @@ -1022,8 +1017,8 @@ static s32 rnpgbe_init_hw_ops_n500(struct rnpgbe_hw *hw) return status; } -static s32 rnpgbe_get_permtion_mac_addr_n500(struct rnpgbe_hw *hw, - u8 *mac_addr) +static s32 rnpgbe_get_permtion_mac_addr(struct rnpgbe_hw *hw, + u8 *mac_addr) { if (rnpgbe_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) eth_random_addr(mac_addr); @@ -1036,12 +1031,11 @@ static s32 rnpgbe_get_permtion_mac_addr_n500(struct rnpgbe_hw *hw, return 0; } -static s32 rnpgbe_reset_hw_ops_n500(struct rnpgbe_hw *hw) +static s32 rnpgbe_reset_hw_ops(struct rnpgbe_hw *hw) { int i; struct rnpgbe_dma_info *dma = &hw->dma; struct rnpgbe_eth_info *eth = &hw->eth; - struct rnpgbe_mac_info *mac = &hw->mac; /* Call adapter stop to disable tx/rx and clear interrupts */ dma_wr32(dma, RNP_DMA_AXI_EN, 0); @@ -1052,74 +1046,44 @@ static s32 rnpgbe_reset_hw_ops_n500(struct rnpgbe_hw *hw) /* Store the permanent mac address */ if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) { - rnpgbe_get_permtion_mac_addr_n500(hw, hw->mac.perm_addr); + rnpgbe_get_permtion_mac_addr(hw, hw->mac.perm_addr); memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); } hw->ops.init_rx_addrs(hw); - /* n500 should do this */ - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, - RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR); + /* default drop */ + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, 0); - wr32(hw, RNP_DMA_RX_DATA_PROG_FULL_THRESH, 0xa); + hw_wr32(hw, RNP_DMA_RX_DATA_PROG_FULL_THRESH, 0xa); /* reset all ring msix table to 0 */ for (i = 0; i < 12; i++) rnpgbe_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); - { - u32 value = 0; - - value |= RNP_MODE_NO_SA_INSER << RNP_SARC_OFFSET; - value &= (~RNP_TWOKPE_MASK); - value &= (~RNP_SFTERR_MASK); - value |= (RNP_CST_MASK); - value |= RNP_TC_MASK; - value &= (~RNP_WD_MASK); - value &= (~RNP_JD_MASK); - value &= (~RNP_BE_MASK); - value |= (RNP_JE_MASK); - value |= (RNP_IFG_96 << RNP_IFG_OFFSET); - value &= (~RNP_DCRS_MASK); - value &= (~RNP_PS_MASK); - value &= (~RNP_FES_MASK); - value &= (~RNP_DO_MASK); - value &= (~RNP_LM_MASK); - value |= RNP_DM_MASK; - value |= RNP_IPC_MASK; /* open rx checksum */ - value &= (~RNP_DR_MASK); - value &= (~RNP_LUD_MASK); - value |= (RNP_BL_MODE << RNP_BL_OFFSET); - value &= (~RNP_DC_MASK); - value |= RNP_TE_MASK; - value |= (RNP_PRELEN_MODE); - /* not setup this if in ncsi mode */ - if (!hw->ncsi_en) - mac_wr32(mac, GMAC_CONTROL, value); - } - /* if ncsi on, sync hw status */ if (hw->ncsi_en) rnpgbe_mbx_phy_pause_get(hw, &hw->fc.requested_mode); else rnpgbe_mbx_phy_pause_set(hw, hw->fc.requested_mode); + rnpgbe_mbx_get_lane_stat(hw); + hw->link = 0; + return 0; } -static s32 rnpgbe_start_hw_ops_n500(struct rnpgbe_hw *hw) +static s32 rnpgbe_start_hw_ops(struct rnpgbe_hw *hw) { s32 ret_val = 0; struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_dma_info *dma = &hw->dma; /* ETH Registers */ - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, - RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR); + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, 0); - eth_wr32(eth, RNP500_ETH_BYPASS, 0); - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, 0); + eth_wr32(eth, RNPGBE_ETH_BYPASS, 0); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_RING, 0); /* DMA common Registers */ dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS); @@ -1139,7 +1103,7 @@ static s32 rnpgbe_start_hw_ops_n500(struct rnpgbe_hw *hw) /* set n500 min/max packet according to new_mtu * we support mtu + 14 + 4 * 3 as max packet LENGTH_ERROR */ -static void rnpgbe_set_mtu_hw_ops_n500(struct rnpgbe_hw *hw, int new_mtu) +static void rnpgbe_set_mtu_hw_ops(struct rnpgbe_hw *hw, int new_mtu) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; @@ -1159,14 +1123,18 @@ static void rnpgbe_set_mtu_hw_ops_n500(struct rnpgbe_hw *hw, int new_mtu) (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL)) max = hw->max_length; + /* if ncsi not set max too small */ + if (hw->ncsi_en && max < 1522) + max = 1522; + hw->min_length_current = min; hw->max_length_current = max; eth->ops.set_min_max_packet(eth, min, max); } /* setup n500 vlan filter status */ -static void rnpgbe_set_vlan_filter_en_hw_ops_n500(struct rnpgbe_hw *hw, - bool status) +static void rnpgbe_set_vlan_filter_en_hw_ops(struct rnpgbe_hw *hw, + bool status) { struct rnpgbe_eth_info *eth = &hw->eth; @@ -1175,8 +1143,8 @@ static void rnpgbe_set_vlan_filter_en_hw_ops_n500(struct rnpgbe_hw *hw, /* set vlan to n500 vlan filter table & veb */ /* pf setup call */ -static void rnpgbe_set_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, - bool enable, bool sriov_flag) +static void rnpgbe_set_vlan_filter_hw_ops(struct rnpgbe_hw *hw, u16 vid, + bool enable, bool sriov_flag) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_dma_info *dma = &hw->dma; @@ -1208,8 +1176,8 @@ static void rnpgbe_set_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, } } -static int rnpgbe_set_veb_vlan_mask_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, - int vf, bool enable) +static int rnpgbe_set_veb_vlan_mask_hw_ops(struct rnpgbe_hw *hw, u16 vid, + int vf, bool enable) { struct list_head *pos; struct vf_vebvlans *entry; @@ -1263,9 +1231,9 @@ static int rnpgbe_set_veb_vlan_mask_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, return err; } -static void rnpgbe_set_vf_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, - int vf, bool enable, - bool veb_only) +static void rnpgbe_set_vf_vlan_filter_hw_ops(struct rnpgbe_hw *hw, u16 vid, + int vf, bool enable, + bool veb_only) { struct rnpgbe_dma_info *dma = &hw->dma; @@ -1281,7 +1249,7 @@ static void rnpgbe_set_vf_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid, } } -static void rnpgbe_clr_vlan_veb_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clr_vlan_veb_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_dma_info *dma = &hw->dma; u32 vfnum = hw->vfnum; @@ -1290,8 +1258,8 @@ static void rnpgbe_clr_vlan_veb_hw_ops_n500(struct rnpgbe_hw *hw) } /* setup n500 vlan strip status */ -static void rnpgbe_set_vlan_strip_hw_ops_n500(struct rnpgbe_hw *hw, u16 queue, - bool strip) +static void rnpgbe_set_vlan_strip_hw_ops(struct rnpgbe_hw *hw, u16 queue, + bool strip) { struct rnpgbe_eth_info *eth = &hw->eth; @@ -1299,8 +1267,8 @@ static void rnpgbe_set_vlan_strip_hw_ops_n500(struct rnpgbe_hw *hw, u16 queue, } /* update new n500 mac */ -static void rnpgbe_set_mac_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac, - bool sriov_flag) +static void rnpgbe_set_mac_hw_ops(struct rnpgbe_hw *hw, u8 *mac, + bool sriov_flag) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_dma_info *dma = &hw->dma; @@ -1320,7 +1288,7 @@ static void rnpgbe_set_mac_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac, } /** - * rnpgbe_write_uc_addr_list_n500 - write unicast addresses to RAR table + * rnpgbe_write_uc_addr_list - write unicast addresses to RAR table * @hw: hardware structure * @netdev: network interface device structure * @sriov_flag: sriov on or not @@ -1330,9 +1298,9 @@ static void rnpgbe_set_mac_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac, * 0 on no addresses written * X on writing X addresses to the RAR table **/ -static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw, - struct net_device *netdev, - bool sriov_flag) +static int rnpgbe_write_uc_addr_list(struct rnpgbe_hw *hw, + struct net_device *netdev, + bool sriov_flag) { unsigned int rar_entries = hw->num_rar_entries - 1; u32 vfnum = hw->vfnum; @@ -1364,7 +1332,7 @@ static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw, if (!rar_entries) break; eth->ops.set_rar(eth, rar_entries, ha->addr, - RNP500_RAH_AV); + RNPGBE_RAH_AV); if (sriov_flag) eth->ops.set_vmdq(eth, rar_entries, vfnum); @@ -1378,7 +1346,7 @@ static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw, ret = hw->ops.get_ncsi_mac(hw, ncsi_addr, i); if (!ret) { eth->ops.set_rar(eth, NCSI_RAR_IDX_START + i, ncsi_addr, - RNP500_RAH_AV); + RNPGBE_RAH_AV); } } @@ -1390,31 +1358,30 @@ static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw, return count; } -static void rnpgbe_set_rx_mode_hw_ops_n500(struct rnpgbe_hw *hw, - struct net_device *netdev, - bool sriov_flag) +static void rnpgbe_set_rx_mode_hw_ops(struct rnpgbe_hw *hw, + struct net_device *netdev, + bool sriov_flag) { - struct rnpgbe_adapter *adapter = netdev_priv(netdev); u32 fctrl; netdev_features_t features = netdev->features; int count; struct rnpgbe_eth_info *eth = &hw->eth; /* broadcast always bypass */ - fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL) | RNP500_FCTRL_BPE; + fctrl = eth_rd32(eth, RNPGBE_ETH_DMAC_FCTRL) | RNPGBE_FCTRL_BPE; /* clear the bits we are changing the status of */ - fctrl &= ~(RNP500_FCTRL_UPE | RNP500_FCTRL_MPE); + fctrl &= ~(RNPGBE_FCTRL_UPE | RNPGBE_FCTRL_MPE); /* promisc mode */ if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; - fctrl |= (RNP500_FCTRL_UPE | RNP500_FCTRL_MPE); + fctrl |= (RNPGBE_FCTRL_UPE | RNPGBE_FCTRL_MPE); /* disable hardware filter vlans in promisc mode */ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; features &= ~NETIF_F_HW_VLAN_CTAG_RX; } else { if (netdev->flags & IFF_ALLMULTI) { - fctrl |= RNP500_FCTRL_MPE; + fctrl |= RNPGBE_FCTRL_MPE; } else { /* Write addresses to the MTA, if the attempt fails * then we should just turn on promiscuous mode so @@ -1423,7 +1390,7 @@ static void rnpgbe_set_rx_mode_hw_ops_n500(struct rnpgbe_hw *hw, /* we always update vf multicast info */ count = eth->ops.update_mc_addr_list(eth, netdev, true); if (count < 0) - fctrl |= RNP500_FCTRL_MPE; + fctrl |= RNPGBE_FCTRL_MPE; } hw->addr_ctrl.user_set_promisc = false; } @@ -1432,31 +1399,22 @@ static void rnpgbe_set_rx_mode_hw_ops_n500(struct rnpgbe_hw *hw, * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - if (rnpgbe_write_uc_addr_list_n500(hw, netdev, sriov_flag) < 0) - fctrl |= RNP500_FCTRL_UPE; + if (rnpgbe_write_uc_addr_list(hw, netdev, sriov_flag) < 0) + fctrl |= RNPGBE_FCTRL_UPE; - eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl); + eth_wr32(eth, RNPGBE_ETH_DMAC_FCTRL, fctrl); if (features & NETIF_F_HW_VLAN_CTAG_FILTER) eth->ops.set_vlan_filter(eth, true); else eth->ops.set_vlan_filter(eth, false); - if (hw->addr_ctrl.user_set_promisc || - adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) { - /* set pkt_len_err and hdr_len_err default to 1 */ - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, - PKT_LEN_ERR | HDR_LEN_ERR); - } else { - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0); - } - /* update mtu */ hw->ops.set_mtu(hw, netdev->mtu); } /* setup an rar with vfnum */ -static void rnpgbe_set_rar_with_vf_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac, - int idx, u32 vfnum, bool enable) +static void rnpgbe_set_rar_with_vf_hw_ops(struct rnpgbe_hw *hw, u8 *mac, + int idx, u32 vfnum, bool enable) { struct rnpgbe_eth_info *eth = &hw->eth; @@ -1465,14 +1423,14 @@ static void rnpgbe_set_rar_with_vf_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac, eth->ops.set_vmdq(eth, idx, vfnum); } -static void rnpgbe_clr_rar_hw_ops_n500(struct rnpgbe_hw *hw, int idx) +static void rnpgbe_clr_rar_hw_ops(struct rnpgbe_hw *hw, int idx) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.clear_rar(eth, idx); } -static void rnpgbe_clr_rar_all_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clr_rar_all_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; unsigned int rar_entries = hw->num_rar_entries - 1; @@ -1482,7 +1440,7 @@ static void rnpgbe_clr_rar_all_hw_ops_n500(struct rnpgbe_hw *hw) eth->ops.clear_rar(eth, rar_entries); } -static void rnpgbe_set_fcs_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool status) +static void rnpgbe_set_fcs_mode_hw_ops(struct rnpgbe_hw *hw, bool status) { struct rnpgbe_mac_info *mac = &hw->mac; struct rnpgbe_eth_info *eth = &hw->eth; @@ -1491,11 +1449,14 @@ static void rnpgbe_set_fcs_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool status) eth->ops.set_fcs(eth, status); } -static void rnpgbe_set_mac_rx_hw_ops_n500(struct rnpgbe_hw *hw, bool status) +static void rnpgbe_set_mac_rx_hw_ops(struct rnpgbe_hw *hw, bool status) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_mac_info *mac = &hw->mac; + if (pci_channel_offline(hw->pdev)) + return; + if (status) { mac->ops.set_mac_rx(mac, status); eth->ops.set_rx(eth, status); @@ -1505,37 +1466,37 @@ static void rnpgbe_set_mac_rx_hw_ops_n500(struct rnpgbe_hw *hw, bool status) } } -static void rnpgbe_set_sriov_status_hw_ops_n500(struct rnpgbe_hw *hw, - bool status) +static void rnpgbe_set_sriov_status_hw_ops(struct rnpgbe_hw *hw, + bool status) { struct rnpgbe_dma_info *dma = &hw->dma; struct rnpgbe_eth_info *eth = &hw->eth; u32 v, fctrl; - fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL); -#define RNP500_DMAC_MASK (0x7f) - fctrl &= ~RNP500_DMAC_MASK; + fctrl = eth_rd32(eth, RNPGBE_ETH_DMAC_FCTRL); +#define RNPGBE_DMAC_MASK (0x7f) + fctrl &= ~RNPGBE_DMAC_MASK; if (status) { fctrl |= hw->veb_ring; - eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl); + eth_wr32(eth, RNPGBE_ETH_DMAC_FCTRL, fctrl); /* setup default ring */ dma_wr32(dma, RNP_DMA_CONFIG, dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); - v = eth_rd32(eth, RNP500_MRQC_IOV_EN); - v |= RNP500_IOV_ENABLED; - eth_wr32(eth, RNP500_MRQC_IOV_EN, v); + v = eth_rd32(eth, RNPGBE_MRQC_IOV_EN); + v |= RNPGBE_IOV_ENABLED; + eth_wr32(eth, RNPGBE_MRQC_IOV_EN, v); } else { - eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl); - v = eth_rd32(eth, RNP500_MRQC_IOV_EN); - v &= ~(RNP500_IOV_ENABLED); - eth_wr32(eth, RNP500_MRQC_IOV_EN, v); + eth_wr32(eth, RNPGBE_ETH_DMAC_FCTRL, fctrl); + v = eth_rd32(eth, RNPGBE_MRQC_IOV_EN); + v &= ~(RNPGBE_IOV_ENABLED); + eth_wr32(eth, RNPGBE_MRQC_IOV_EN, v); dma->ops.clr_veb_all(dma); } } -static void rnpgbe_set_sriov_vf_mc_hw_ops_n500(struct rnpgbe_hw *hw, - u16 mc_addr) +static void rnpgbe_set_sriov_vf_mc_hw_ops(struct rnpgbe_hw *hw, + u16 mc_addr) { struct rnpgbe_eth_info *eth = &hw->eth; u32 vector_bit; @@ -1545,17 +1506,17 @@ static void rnpgbe_set_sriov_vf_mc_hw_ops_n500(struct rnpgbe_hw *hw, vector_reg = (mc_addr >> 5) & 0x7F; vector_bit = mc_addr & 0x1F; - mta_reg = eth_rd32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg)); + mta_reg = eth_rd32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(vector_reg)); mta_reg |= (1 << vector_bit); - eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg), mta_reg); + eth_wr32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(vector_reg), mta_reg); } -static void rnpgbe_update_sriov_info_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_update_sriov_info_hw_ops(struct rnpgbe_hw *hw) { /* update sriov info to hw */ } -static void rnpgbe_set_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_set_pause_mode_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_mac_info *mac = &hw->mac; struct rnpgbe_eth_info *eth = &hw->eth; @@ -1564,12 +1525,12 @@ static void rnpgbe_set_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw) eth->ops.set_fc_mode(eth); } -static void rnpgbe_get_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_get_pause_mode_hw_ops(struct rnpgbe_hw *hw) { // n500 can get pause mode in link event } -static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_update_hw_info_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_dma_info *dma = &hw->dma; struct rnpgbe_eth_info *eth = &hw->eth; @@ -1577,13 +1538,13 @@ static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw) struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; u32 data; /* 1 enable eth filter */ - eth_wr32(eth, RNP500_HOST_FILTER_EN, 1); + eth_wr32(eth, RNPGBE_HOST_FILTER_EN, 1); /* 2 open redir en */ - eth_wr32(eth, RNP500_REDIR_EN, 1); + eth_wr32(eth, RNPGBE_REDIR_EN, 1); /* 3 open sctp checksum and other checksum */ if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) - eth_wr32(eth, RNP500_ETH_SCTP_CHECKSUM_EN, 1); + eth_wr32(eth, RNPGBE_ETH_SCTP_CHECKSUM_EN, 1); /* 4 mark muticaset as broadcast */ dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff); @@ -1631,33 +1592,46 @@ static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw) mac_wr32(mac, GMAC_FLOW_CTRL, data); /* 11 open tx double vlan according to stags */ - eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1); + eth_wr32(eth, RNPGBE_ETH_TX_VLAN_CONTROL_EANBLE, 1); /* 12 test */ - eth_wr32(eth, RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP, 1); + eth_wr32(eth, RNPGBE_ETH_WHOLE_PKT_LEN_ERR_DROP, 1); /* 13 setup double vlan drop */ if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE) - eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0); + eth_wr32(eth, RNPGBE_ETH_DOUBLE_VLAN_DROP, 0); else - eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1); + eth_wr32(eth, RNPGBE_ETH_DOUBLE_VLAN_DROP, 1); /* 14 open error mask if in rx all mode */ if (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL) { - eth_wr32(eth, RNP500_MAC_ERR_MASK, + eth_wr32(eth, RNPGBE_MAC_ERR_MASK, RUN_FRAME_ERROR | GAINT_FRAME_ERROR | CRC_ERROR | LENGTH_ERROR); - eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0); + eth_wr32(eth, RNPGBE_ETH_DOUBLE_VLAN_DROP, 0); #define FORWARD_ALL_CONTROL (0x2) + eth_wr32(eth, RNPGBE_BAD_PACKETS_RECEIVE_EN, 1); mac_wr32(mac, GMAC_FRAME_FILTER, 0x00000001 | (FORWARD_ALL_CONTROL << 6)); } else { - eth_wr32(eth, RNP500_MAC_ERR_MASK, + eth_wr32(eth, RNPGBE_MAC_ERR_MASK, RUN_FRAME_ERROR | GAINT_FRAME_ERROR); + eth_wr32(eth, RNPGBE_BAD_PACKETS_RECEIVE_EN, 0); mac_wr32(mac, GMAC_FRAME_FILTER, 0x00000001); } + /* setup eth_err_mask */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL) || + (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) { + /* set pkt_len_err and hdr_len_err default to 1 */ + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, + PKT_LEN_ERR | HDR_LEN_ERR); + } else { + /* set 0 to drop in default */ + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, 0); + } + /* 15 update water acoording to max length */ { #define FIFO_ALL (1024) @@ -1668,10 +1642,10 @@ static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw) hw->fc.high_water[0] = water_high; hw->fc.low_water[0] = water_high; - dma_wr32(dma, RNP500_DMA_RBUF_FIFO, + dma_wr32(dma, RNPGBE_DMA_RBUF_FIFO, ((hw->max_length_current + 15) >> 4) + 5); - eth_wr32(eth, RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH, + eth_wr32(eth, RNPGBE_ETH_EMAC_PARSE_PROGFULL_THRESH, ((hw->max_length_current + 15) >> 4) + 2); } /* 16 setup fcs mode */ @@ -1688,30 +1662,30 @@ static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw) data = PRIV_DATA_EN | adapter->priv_skip_count; else data = 0; - eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, data); + eth_wr32(eth, RNPGBE_ETH_PRIV_DATA_CONTROL_REG, data); /* 19 setup mac count read self clear */ - data = mac_rd32(mac, RNP500_MAC_COUNT_CONTROL); + data = mac_rd32(mac, RNPGBE_MAC_COUNT_CONTROL); #define READ_CLEAR BIT(2) data |= READ_CLEAR; - mac_wr32(mac, RNP500_MAC_COUNT_CONTROL, data); + mac_wr32(mac, RNPGBE_MAC_COUNT_CONTROL, data); /* 20 setup prio */ if (adapter->priv_flags & (RNP_PRIV_FLAG_8023_PRIO | RNP_PRIV_FLAG_REMAP_PRIO)) { - eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1); - eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0); - eth_wr32(eth, RNP500_PRIORITY_EN, 1); + eth_wr32(eth, RNPGBE_PRIORITY_1_MARK, RNPGBE_PRIORITY_1); + eth_wr32(eth, RNPGBE_PRIORITY_0_MARK, RNPGBE_PRIORITY_0); + eth_wr32(eth, RNPGBE_PRIORITY_EN, 1); if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO) - eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1); + eth_wr32(eth, RNPGBE_PRIORITY_EN_8023, 1); else - eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0); + eth_wr32(eth, RNPGBE_PRIORITY_EN_8023, 0); } else { - eth_wr32(eth, RNP500_PRIORITY_EN, 0); + eth_wr32(eth, RNPGBE_PRIORITY_EN, 0); } } -static void rnpgbe_update_hw_rx_drop_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_update_hw_rx_drop_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; int i; @@ -1728,15 +1702,15 @@ static void rnpgbe_update_hw_rx_drop_hw_ops_n500(struct rnpgbe_hw *hw) } } -static void rnpgbe_set_rx_hash_hw_ops_n500(struct rnpgbe_hw *hw, bool status, - bool sriov_flag) +static void rnpgbe_set_rx_hash_hw_ops(struct rnpgbe_hw *hw, bool status, + bool sriov_flag) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_rx_hash(eth, status, sriov_flag); } -static s32 rnpgbe_init_rx_addrs_hw_ops_n500(struct rnpgbe_hw *hw) +static s32 rnpgbe_init_rx_addrs_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; @@ -1777,10 +1751,10 @@ static s32 rnpgbe_init_rx_addrs_hw_ops_n500(struct rnpgbe_hw *hw) /* Clear the MTA */ hw->addr_ctrl.mta_in_use = 0; - v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL); + v = eth_rd32(eth, RNPGBE_ETH_DMAC_MCSTCTRL); v &= (~0x3); v |= eth->mc_filter_type; - eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, v); + eth_wr32(eth, RNPGBE_ETH_DMAC_MCSTCTRL, v); hw_dbg(hw, " Clearing MTA\n"); if (!hw->ncsi_en) @@ -1790,18 +1764,18 @@ static s32 rnpgbe_init_rx_addrs_hw_ops_n500(struct rnpgbe_hw *hw) } /* clean vlan filter tables */ -static void rnpgbe_clr_vfta_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clr_vfta_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.clr_vfta(eth); } -static void rnpgbe_set_txvlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool cvlan) +static void rnpgbe_set_txvlan_mode_hw_ops(struct rnpgbe_hw *hw, bool cvlan) { } -static int rnpgbe_set_rss_hfunc_hw_ops_n500(struct rnpgbe_hw *hw, u8 hfunc) +static int rnpgbe_set_rss_hfunc_hw_ops(struct rnpgbe_hw *hw, u8 hfunc) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; @@ -1823,8 +1797,8 @@ static int rnpgbe_set_rss_hfunc_hw_ops_n500(struct rnpgbe_hw *hw, u8 hfunc) return 0; } -static void rnpgbe_set_rss_key_hw_ops_n500(struct rnpgbe_hw *hw, - bool sriov_flag) +static void rnpgbe_set_rss_key_hw_ops(struct rnpgbe_hw *hw, + bool sriov_flag) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; @@ -1835,26 +1809,26 @@ static void rnpgbe_set_rss_key_hw_ops_n500(struct rnpgbe_hw *hw, eth->ops.set_rss_key(eth, sriov_flag); } -static void rnpgbe_set_rss_table_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_set_rss_table_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_rss_table(eth); } -static void rnpgbe_set_mbx_link_event_hw_ops_n500(struct rnpgbe_hw *hw, - int enable) +static void rnpgbe_set_mbx_link_event_hw_ops(struct rnpgbe_hw *hw, + int enable) { rnpgbe_mbx_link_event_enable(hw, enable); } -static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct rnpgbe_hw *hw, int enable) +static void rnpgbe_set_mbx_ifup_hw_ops(struct rnpgbe_hw *hw, int enable) { rnpgbe_mbx_ifup_down(hw, enable); } /** - * rnpgbe_check_mac_link_hw_ops_n500 - Determine link and speed status + * rnpgbe_check_mac_link_hw_ops - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: true when link is up @@ -1863,11 +1837,11 @@ static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct rnpgbe_hw *hw, int enable) * * Reads the links register to determine if link is up and the current speed **/ -static s32 rnpgbe_check_mac_link_hw_ops_n500(struct rnpgbe_hw *hw, - rnpgbe_link_speed *speed, - bool *link_up, - bool *duplex, - bool link_up_wait_to_complete) +static s32 rnpgbe_check_mac_link_hw_ops(struct rnpgbe_hw *hw, + rnpgbe_link_speed *speed, + bool *link_up, + bool *duplex, + bool link_up_wait_to_complete) { struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; @@ -1896,11 +1870,11 @@ static s32 rnpgbe_check_mac_link_hw_ops_n500(struct rnpgbe_hw *hw, return 0; } -static s32 rnpgbe_setup_mac_link_hw_ops_n500(struct rnpgbe_hw *hw, - u32 adv, - u32 autoneg, - u32 speed, - u32 duplex) +static s32 rnpgbe_setup_mac_link_hw_ops(struct rnpgbe_hw *hw, + u32 adv, + u32 autoneg, + u32 speed, + u32 duplex) { rnpgbe_mbx_phy_link_set(hw, adv, autoneg, speed, duplex, hw->tp_mdix_ctrl); @@ -1908,69 +1882,104 @@ static s32 rnpgbe_setup_mac_link_hw_ops_n500(struct rnpgbe_hw *hw, return 0; } -static void rnpgbe_clean_link_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clean_link_hw_ops(struct rnpgbe_hw *hw) { hw->link = 0; } -static void rnpgbe_set_layer2_hw_ops_n500(struct rnpgbe_hw *hw, - union rnpgbe_atr_input *input, - u16 pri_id, u8 queue, bool prio_flag) +static void rnpgbe_set_layer2_hw_ops(struct rnpgbe_hw *hw, + union rnpgbe_atr_input *input, + u16 pri_id, u8 queue, bool prio_flag) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag); } -static void rnpgbe_clr_layer2_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id) +static void rnpgbe_clr_layer2_hw_ops(struct rnpgbe_hw *hw, u16 pri_id) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.clr_layer2_remapping(eth, pri_id); } -static void rnpgbe_clr_all_layer2_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clr_all_layer2_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.clr_all_layer2_remapping(eth); } -static void rnpgbe_clr_all_tuple5_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_clr_all_tuple5_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.clr_all_tuple5_remapping(eth); } -static void rnpgbe_set_tcp_sync_hw_ops_n500(struct rnpgbe_hw *hw, int queue, - bool flag, bool prio) +static void rnpgbe_set_tcp_sync_hw_ops(struct rnpgbe_hw *hw, int queue, + bool flag, bool prio) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio); } -static void rnpgbe_set_rx_skip_hw_ops_n500(struct rnpgbe_hw *hw, int count, - bool flag) +static void rnpgbe_set_rx_skip_hw_ops(struct rnpgbe_hw *hw, int count, + bool flag) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_rx_skip(eth, count, flag); } -static void rnpgbe_set_outer_vlan_type_hw_ops_n500(struct rnpgbe_hw *hw, - int type) +static void rnpgbe_set_outer_vlan_type_hw_ops(struct rnpgbe_hw *hw, + int type) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_outer_vlan_type(eth, type); } -static s32 rnpgbe_phy_read_reg_hw_ops_n500(struct rnpgbe_hw *hw, - u32 reg_addr, - u32 device_type, - u16 *phy_data) +/** + * rnpgbe_get_thermal_sensor_data_hw_ops - Gathers thermal sensor data + * @hw: pointer to hardware structure + * Returns the thermal sensor data structure + **/ +static s32 rnpgbe_get_thermal_sensor_data_hw_ops(struct rnpgbe_hw *hw) +{ + struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data; + int voltage = 0; + + data->sensor[0].temp = rnpgbe_mbx_get_temp(hw, &voltage); + + return 0; +} + +/** + * rnpgbe_init_thermal_sensor_thresh_hw_ops - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 rnpgbe_init_thermal_sensor_thresh_hw_ops(struct rnpgbe_hw *hw) +{ + struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data; + u8 i; + + for (i = 0; i < RNPGBE_MAX_SENSORS; i++) { + data->sensor[i].location = i + 1; + data->sensor[i].caution_thresh = 90; + data->sensor[i].max_op_thresh = 100; + } + + return 0; +} + +static s32 rnpgbe_phy_read_reg_hw_ops(struct rnpgbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 *phy_data) { struct rnpgbe_mac_info *mac = &hw->mac; s32 status = 0; @@ -1982,10 +1991,10 @@ static s32 rnpgbe_phy_read_reg_hw_ops_n500(struct rnpgbe_hw *hw, return status; } -static s32 rnpgbe_phy_write_reg_hw_ops_n500(struct rnpgbe_hw *hw, - u32 reg_addr, - u32 device_type, - u16 phy_data) +static s32 rnpgbe_phy_write_reg_hw_ops(struct rnpgbe_hw *hw, + u32 reg_addr, + u32 device_type, + u16 phy_data) { struct rnpgbe_mac_info *mac = &hw->mac; s32 status = 0; @@ -1995,16 +2004,16 @@ static s32 rnpgbe_phy_write_reg_hw_ops_n500(struct rnpgbe_hw *hw, return status; } -static void rnpgbe_setup_wol_hw_ops_n500(struct rnpgbe_hw *hw, u32 mode) +static void rnpgbe_setup_wol_hw_ops(struct rnpgbe_hw *hw, u32 mode) { struct rnpgbe_mac_info *mac = &hw->mac; mac->ops.pmt(mac, mode, !!hw->ncsi_en); } -static void rnpgbe_setup_eee_hw_ops_n500(struct rnpgbe_hw *hw, - int ls, int tw, - u32 local_eee) +static void rnpgbe_setup_eee_hw_ops(struct rnpgbe_hw *hw, + int ls, int tw, + u32 local_eee) { struct rnpgbe_mac_info *mac = &hw->mac; @@ -2014,37 +2023,37 @@ static void rnpgbe_setup_eee_hw_ops_n500(struct rnpgbe_hw *hw, rnpgbe_mbx_phy_eee_set(hw, tw, local_eee); } -static void rnpgbe_set_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw, - bool en_tx_lpi_clockgating) +static void rnpgbe_set_eee_mode_hw_ops(struct rnpgbe_hw *hw, + bool en_tx_lpi_clockgating) { struct rnpgbe_mac_info *mac = &hw->mac; mac->ops.set_eee_mode(mac, en_tx_lpi_clockgating); } -static void rnpgbe_reset_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_reset_eee_mode_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_mac_info *mac = &hw->mac; mac->ops.reset_eee_mode(mac); } -static void rnpgbe_set_eee_pls_hw_ops_n500(struct rnpgbe_hw *hw, int link) +static void rnpgbe_set_eee_pls_hw_ops(struct rnpgbe_hw *hw, int link) { struct rnpgbe_mac_info *mac = &hw->mac; mac->ops.set_eee_pls(mac, link); } -static u32 rnpgbe_get_lpi_status_hw_ops_n500(struct rnpgbe_hw *hw) +static u32 rnpgbe_get_lpi_status_hw_ops(struct rnpgbe_hw *hw) { struct rnpgbe_mac_info *mac = &hw->mac; return mac->ops.get_lpi_status(mac); } -static int rnpgbe_get_ncsi_mac_hw_ops_n500(struct rnpgbe_hw *hw, - u8 *addr, int idx) +static int rnpgbe_get_ncsi_mac_hw_ops(struct rnpgbe_hw *hw, + u8 *addr, int idx) { #define NCSI_MAC_H(i) (0x48 + (i) * 0x8) #define NCSI_MAC_L(i) (0x4C + (i) * 0x8) @@ -2067,8 +2076,8 @@ static int rnpgbe_get_ncsi_mac_hw_ops_n500(struct rnpgbe_hw *hw, } } -static int rnpgbe_get_ncsi_vlan_hw_ops_n500(struct rnpgbe_hw *hw, - u16 *vlan, int idx) +static int rnpgbe_get_ncsi_vlan_hw_ops(struct rnpgbe_hw *hw, + u16 *vlan, int idx) { #define NCSI_VLAN(i) (0x80 + (i) * 0x10) struct rnpgbe_mac_info *mac = &hw->mac; @@ -2084,26 +2093,333 @@ static int rnpgbe_get_ncsi_vlan_hw_ops_n500(struct rnpgbe_hw *hw, } } -static void rnpgbe_set_lldp_hw_ops_n500(struct rnpgbe_hw *hw, bool enable) +static void rnpgbe_dump_rings_regs(struct rnpgbe_hw *hw) +{ + struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; + struct net_device *netdev = adapter->netdev; + struct device *dev = &hw->pdev->dev; + struct rnpgbe_ring *ring; + u32 head = 0; + u32 tail = 0; + int i; + + for (i = 0; i < RNP_NUM_TX_QUEUES; i++) { + ring = adapter->tx_ring[i]; + head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD), + tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL), + dev_info(dev, "\tTxq-%-3u (0x%08x)-head : (%4u),\t", + ring->rnpgbe_queue_idx, + RNPGBE_RING_BASE + RING_OFFSET(ring->rnpgbe_queue_idx) + + RNP_DMA_REG_TX_DESC_BUF_HEAD, head); + dev_info(dev, "(0x%08x)-tail : (%4u),\t ntu : (%4u),\t ntc : (%4u),\t\n", + RNPGBE_RING_BASE + RING_OFFSET(ring->rnpgbe_queue_idx) + + RNP_DMA_REG_TX_DESC_BUF_TAIL, + tail, + ring->next_to_use, + ring->next_to_clean); + } + + for (i = 0; i < RNP_NUM_RX_QUEUES; i++) { + ring = adapter->rx_ring[i]; + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD), + tail = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL), + dev_info(dev, "\tTxq-%-3u (0x%08x)-head : (%4u),\t", + ring->rnpgbe_queue_idx, + RNPGBE_RING_BASE + RING_OFFSET(ring->rnpgbe_queue_idx) + + RNP_DMA_REG_RX_DESC_BUF_HEAD, + head); + + dev_info(dev, "(0x%08x)-tail : (%4u),\t ntu : (%4u),\t ntc : (%4u)\t\n", + RNPGBE_RING_BASE + RING_OFFSET(ring->rnpgbe_queue_idx) + + RNP_DMA_REG_RX_DESC_BUF_TAIL, + tail, + ring->next_to_use, + ring->next_to_clean); + } +} + +static const struct rnpgbe_debug_reg tx_debug_reg_eth[] = { + {"3to1(in from host)", RNPGBE_ETH_3TO1_HOST}, + {"3to1(in from sw)", RNPGBE_ETH_3TO1_SW}, + {"3to1(in from bmc)", RNPGBE_ETH_3TO1_BMC}, + {"3to1(out all)", RNPGBE_ETH_3TO1_OUT}, + {"out (multiple)", RNPGBE_ETH_OUT_MULTIPLE}, + {"out (broadcast)", RNPGBE_ETH_OUT_BROADCAST}, + {"out (ptp)", RNPGBE_ETH_OUT_PTP}, + {"out (drop)", RNPGBE_ETH_OUT_DROP}, + {"tx_trans", RNPGBE_ETH_TX_TRANS}, + {"tx_trans_status_0", RNPGBE_ETH_TX_TRANS_STATUS_0}, + {"tx_trans_status_1", RNPGBE_ETH_TX_TRANS_STATUS_1}, + {"tx_trans_sop", RNPGBE_ETH_TX_TRANS_SOP}, + {"tx_trans_eop", RNPGBE_ETH_TX_TRANS_EOP} +}; + +static const struct rnpgbe_debug_reg tx_debug_reg_mac[] = { + {"mac_status", 0} +}; + +static void rnpgbe_dump_tx_regs(struct rnpgbe_hw *hw) +{ + struct rnpgbe_eth_info *eth = &hw->eth; + struct rnpgbe_mac_info *mac = &hw->mac; + struct device *dev = &hw->pdev->dev; + u32 value; + int i; + + /* eth */ + for (i = 0; i < sizeof(tx_debug_reg_eth) / sizeof(struct rnpgbe_debug_reg); i++) { + value = eth_rd32(eth, tx_debug_reg_eth[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + tx_debug_reg_eth[i].name, + value, value); + } + + /* mac */ + for (i = 0; i < sizeof(tx_debug_reg_mac) / sizeof(struct rnpgbe_debug_reg); i++) { + value = mac_rd32(mac, tx_debug_reg_mac[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + tx_debug_reg_mac[i].name, + value, value); + } +} + +static const struct rnpgbe_debug_reg rx_debug_reg_trans[] = { + {"pkts-in", RNPGBE_ETH_PKTS_IN}, + {"pkts-out", RNPGBE_ETH_PKTS_OUT}, + {"pkts-drop(fifo full or abnormal)", RNPGBE_ETH_PKTS_DRIP}, + {"pkts-in(ethII)", RNPGBE_ETH_PKTS_IN_ETH2}, + {"pkts-in(802.3)", RNPGBE_ETH_PKTS_IN_8023}, + {"pkts-in(control)", RNPGBE_ETH_PKTS_IN_CONTROL}, + {"pkts-in(udp)", RNPGBE_ETH_PKTS_IN_UDP}, + {"pkts-in(tcp)", RNPGBE_ETH_PKTS_IN_TCP}, + {"pkts-in(icmp)", RNPGBE_ETH_PKTS_IN_ICMP}, + {"pkts-in(lcs_err)", RNPGBE_ETH_PKTS_IN_LCS_ERR}, + {"pkts-in(len_err)", RNPGBE_ETH_PKTS_IN_LEN_ERR}, + {"pkts-in(dmac_failed)", RNPGBE_ETH_PKTS_IN_DMAC_F}, + {"pkts-in(smac_failed)", RNPGBE_ETH_PKTS_IN_SMAC_F}, + {"pkts-in(slen_err)", RNPGBE_ETH_PKTS_IN_SLEN_ERR}, + {"pkts-in(glen_err)", RNPGBE_ETH_PKTS_IN_GLEN_ERR}, + {"pkts-in(iph_err)", RNPGBE_ETH_PKTS_IN_IPH_ERR}, + {"pkts-in(payload_err)", RNPGBE_ETH_PKTS_IN_PAYLOAD_ERR}, + {"pkts-in(ipv4)", RNPGBE_ETH_PKTS_IN_IPV4}, + {"pkts-in(ipv6)", RNPGBE_ETH_PKTS_IN_IPV6}, + {"pkts-in(cut_err)", RNPGBE_ETH_PKTS_IN_CUT_ERR}, + {"pkts-in(except_bytes)", RNPGBE_ETH_PKTS_IN_EXCEPT_BYTES}, + {"pkts-in(fcs_err)", RNPGBE_ETH_PKTS_IN_FCS_ERR}, + {"pkts-in(mac_len_err)", RNPGBE_ETH_PKTS_IN_MAC_LEN_ERR} +}; + +static const struct rnpgbe_debug_reg rx_debug_reg_gather[] = { + {"pkts-in", RNPGBE_GATHER_PKTS_IN}, + {"pkts-out", RNPGBE_GATHER_PKTS_OUT}, + {"pkts-out(mutiplecast)", RNPGBE_GATHER_PKTS_OUT_MUL}, + {"pkts-out(broadcast)", RNPGBE_GATHER_PKTS_OUT_BRO}, + {"pkts(drop)", RNPGBE_GATHER_PKTS_IN_DROP}, + {"pkts-in(mac_cut)", RNPGBE_GATHER_PKTS_IN_MAC_CUT}, + {"pkts-in(mac_lcs_err)", RNPGBE_GATHER_PKTS_IN_MAC_LCS_ERR}, + {"pkts-in(mac_len_err)", RNPGBE_GATHER_PKTS_IN_MAC_LEN_ERR}, + {"pkts-in(mac_slen_err)", RNPGBE_GATHER_PKTS_IN_MAC_SLEN_ERR}, + {"pkts-in(mac_glen_err)", RNPGBE_GATHER_PKTS_IN_MAC_GLEN_ERR}, + {"pkts-in(mac_fcs_err)", RNPGBE_GATHER_PKTS_IN_MAC_FCS_ERR}, + {"pkts-in(mac<64byts fcs_err)", RNPGBE_GATHER_PKTS_IN_SMALL_64}, + {"pkts-in(mac>=64byts fcs_err)", RNPGBE_GATHER_PKTS_IN_LARGE_64} +}; + +static const struct rnpgbe_debug_reg rx_debug_reg_pip_parse[] = { + {"pkts-in", RNPGBE_PARSE_PKTS_IN}, + {"pkts-out", RNPGBE_PARSE_PKTS_OUT}, + {"pkts(arp request)", RNPGBE_PARSE_PKTS_ARP_REQUEST}, + {"pkts(arp response)", RNPGBE_PARSE_PKTS_ARP_RESPONS}, + {"pkts(icmp)", RNPGBE_PARSE_PKTS_ICMP}, + {"pkts(udp)", RNPGBE_PARSE_PKTS_UDP}, + {"pkts(tcp)", RNPGBE_PARSE_PKTS_TCP}, + {"pkts(arp cut)", RNPGBE_PARSE_PKTS_ARP_CUT}, + {"pkts(ND_CUT)", RNPGBE_PARSE_PKTS_ND_CUT}, + {"pkts(sctp)", RNPGBE_PARSE_PKTS_SCTP}, + {"pkts(tcp syn)", RNPGBE_PARSE_PKTS_TCP_SYN}, + {"pkts(fragment)", RNPGBE_PARSE_PKTS_FRAGMENT}, + {"pkts(1 vlan)", RNPGBE_PARSE_PKTS_1_VLAN}, + {"pkts(2 vlans)", RNPGBE_PARSE_PKTS_2_VLANS}, + {"pkts(ipv4)", RNPGBE_PARSE_PKTS_IPV4}, + {"pkts(ipv6)", RNPGBE_PARSE_PKTS_IPV6}, + {"pkts(ip hdr err)", RNPGBE_PARSE_PKTS_IP_HDR_ERR}, + {"pkts(ip pkt err)", RNPGBE_PARSE_PKTS_IP_PKT_ERR}, + {"pkts(l3 hdr chk err)", RNPGBE_PARSE_PKTS_L3_HDR_CHK_ERR}, + {"pkts(l4 hdr chk err)", RNPGBE_PARSE_PKTS_L4_HDR_CHK_ERR}, + {"pkts(sctp hdr chk err)", RNPGBE_PARSE_PKTS_SCTP_HDR_CHK_ERR}, + {"pkts(vlan err)", RNPGBE_PARSE_PKTS_VLAN_ERR}, + {"pkts(rdma)", RNPGBE_PARSE_PKTS_RDMA}, + {"pkts(arp auto response)", RNPGBE_PARSE_PKTS_ARP_AUTO_RESP}, + {"pkts(icmpv6)", RNPGBE_PARSE_PKTS_ICMPV6}, + {"pkts(ipv6 extend)", RNPGBE_PARSE_PKTS_IPV6_EXTEND}, + {"pkts(802.3)", RNPGBE_PARSE_PKTS_8023}, + {"pkts(except short)", RNPGBE_PARSE_PKTS_EXCEPT_SHORT}, + {"pkts(ptp)", RNPGBE_PARSE_PKTS_PTP}, + {"pkts(NS req)", RNPGBE_PARSE_PKTS_NS_REQ}, + {"pkts(NS_NA auto res)", RNPGBE_PARSE_PKTS_NS_NA_AUTO_RES} +}; + +static const struct rnpgbe_debug_reg rx_debug_reg_pip_decap[] = { + {"pkts-in(all)", 0x82d0}, + {"pkts-out(all)", 0x82d4}, + {"pkts-out(host)", 0x82d8}, + {"pkts-out(bmc)", 0x82dc}, + {"pkts-out(switch)", 0x82e0}, + {"pkts-out(bmc+host)", 0x82e4}, + {"pkts(drop invalid)", 0x82e8}, + {"pkts(drop filter)", 0x82ec}, + {"pkts(drop host Insufficient perf)", 0x82f0}, + {"pkts(drop bmc Insufficient perf)", 0x82f4}, + {"pkts(drop switch Insufficient perf)", 0x82f8}, + {"pkts(rm vlan)", 0x82fc} +}; + +static const struct rnpgbe_debug_reg_bits regs_debug[] = { + {0x3fff, 0x8448, + {"gat_fifo_progfull", "gat_info_fifo_progfull", + "parse_fifo_progfull", "parse_info_fifo_progfull", + "res", "cov_fifo_progfull", "host_fifo_progfull", + "host_info_fifo_progfull", "sw_fifo_progfull", + "sw_info_fifo_progfull", "bmc_fifo_progfull", + "bmc_info_fifo_progfull", "mac_rxdatafifo_progfull", + "mac_rxinfo_fifo_progfull", " ", " ", " ", " ", + " ", " ", " ", " ", " ", " ", " ", " ", + " ", " ", " ", " ", " ", " " + }, + }, + + {0x3c2f, 0x844c, + {"gat_fifo_full", "gat_info_fifo_full", + "parse_fifo_full", "parse_info_fifo_full", + " ", "cov_fifo_progfull", " ", + " ", " ", + " ", "bmc_fifo_full", + "bmc_info_fifo_full", "mac_rxdatafifo_full", + "mac_rxinfo_fifo_full", " ", " ", " ", " ", + " ", " ", " ", " ", " ", " ", " ", " ", + " ", " ", " ", " ", " ", " " + }, + } +}; + +static void rnpgbe_dump_rx_regs(struct rnpgbe_hw *hw) +{ + struct rnpgbe_eth_info *eth = &hw->eth; + struct device *dev = &hw->pdev->dev; + u32 value; + int i; + + /* rx trans */ + dev_info(dev, "\trx trans module:\n"); + for (i = 0; i < sizeof(rx_debug_reg_trans) / sizeof(struct rnpgbe_debug_reg); i++) { + value = eth_rd32(eth, rx_debug_reg_trans[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + rx_debug_reg_trans[i].name, + value, value); + } + + dev_info(dev, "\temac gather module:\n"); + for (i = 0; i < sizeof(rx_debug_reg_gather) / sizeof(struct rnpgbe_debug_reg); i++) { + value = eth_rd32(eth, rx_debug_reg_gather[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + rx_debug_reg_gather[i].name, + value, value); + } + + /* pip parse */ + dev_info(dev, "\tpip parse module:\n"); + for (i = 0; i < sizeof(rx_debug_reg_pip_parse) / sizeof(struct rnpgbe_debug_reg); i++) { + value = eth_rd32(eth, rx_debug_reg_pip_parse[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + rx_debug_reg_pip_parse[i].name, + value, value); + } + /* pip decap */ + dev_info(dev, "\tpip decap module:\n"); + for (i = 0; i < sizeof(rx_debug_reg_pip_decap) / sizeof(struct rnpgbe_debug_reg); i++) { + value = eth_rd32(eth, rx_debug_reg_pip_decap[i].offset); + dev_info(dev, "\t%s \t:0x%08x(%4u)", + rx_debug_reg_pip_decap[i].name, + value, value); + } + + dev_info(dev, "\tdebug reg:\n"); + for (i = 0; i < sizeof(regs_debug) / sizeof(struct rnpgbe_debug_reg_bits); i++) { + int j; + + value = eth_rd32(eth, regs_debug[i].offset); + /* for each bits */ + for (j = 0; j < 32; j++) { + /* only check flags set value */ + if ((value & regs_debug[i].flags) & (1 << j)) { + dev_info(dev, + "\t%s detected\n", regs_debug[i].name[j]); + } + } + } + dev_info(dev, "\tend\n"); +} + +static void rnpgbe_dump_gephy(struct rnpgbe_hw *hw) +{ + struct device *dev = &hw->pdev->dev; + u16 page = 0; + u32 reg = 0; + u16 value; + + for (page = 0; page < 0x12; page++) { + /* write page */ + hw->ops.phy_write_reg(hw, 0x1f, 0, page); + + for (reg = 0; reg < 31; reg++) { + hw->ops.phy_read_reg(hw, reg, 0, &value); + dev_info(dev, "page %02d reg 0x%02x:0x%04x\n", page, reg, value); + } + } + hw->ops.phy_write_reg(hw, 0x1f, 0, 0); +} + +static int rnpgbe_dump_debug_regs_hw_ops(struct rnpgbe_hw *hw, + char *cmd) +{ + int ret = -1; + + if (!strncmp(cmd, "ring", 4)) { + rnpgbe_dump_rings_regs(hw); + ret = 0; + } else if (!strncmp(cmd, "tx", 2)) { + rnpgbe_dump_tx_regs(hw); + ret = 0; + } else if (!strncmp(cmd, "rx", 2)) { + rnpgbe_dump_rx_regs(hw); + ret = 0; + } else if (!strncmp(cmd, "gephy", 5)) { + rnpgbe_dump_gephy(hw); + ret = 0; + } + return ret; +} + +static void rnpgbe_set_lldp_hw_ops(struct rnpgbe_hw *hw, bool enable) { rnpgbe_mbx_lldp_set(hw, enable); } -static void rnpgbe_get_lldp_hw_ops_n500(struct rnpgbe_hw *hw) +static void rnpgbe_get_lldp_hw_ops(struct rnpgbe_hw *hw) { } -static void rnpgbe_set_eee_timer_hw_ops_n500(struct rnpgbe_hw *hw, - int ls, int tw) +static void rnpgbe_set_eee_timer_hw_ops(struct rnpgbe_hw *hw, + int ls, int tw) { struct rnpgbe_mac_info *mac = &hw->mac; mac->ops.set_eee_timer(mac, ls, tw); } -static void rnpgbe_set_vf_vlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, - u16 vlan, int vf, - bool enable) +static void rnpgbe_set_vf_vlan_mode_hw_ops(struct rnpgbe_hw *hw, + u16 vlan, int vf, + bool enable) { struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; @@ -2112,9 +2428,9 @@ static void rnpgbe_set_vf_vlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable); } -static void rnpgbe_driver_status_hw_ops_n500(struct rnpgbe_hw *hw, - bool enable, - int mode) +static void rnpgbe_driver_status_hw_ops(struct rnpgbe_hw *hw, + bool enable, + int mode) { switch (mode) { case rnpgbe_driver_insmod: @@ -2123,23 +2439,23 @@ static void rnpgbe_driver_status_hw_ops_n500(struct rnpgbe_hw *hw, case rnpgbe_driver_suspuse: rnpgbe_mbx_ifsuspuse(hw, enable); break; - case rnpgbe_driver_force_control_mac: + case rnpgbe_driver_force_control_phy: rnpgbe_mbx_ifforce_control_mac(hw, enable); break; } } -static void rnpgbe_set_tuple5_hw_ops_n500(struct rnpgbe_hw *hw, - union rnpgbe_atr_input *input, - u16 pri_id, u8 queue, bool prio_flag) +static void rnpgbe_set_tuple5_hw_ops(struct rnpgbe_hw *hw, + union rnpgbe_atr_input *input, + u16 pri_id, u8 queue, bool prio_flag) { struct rnpgbe_eth_info *eth = &hw->eth; eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag); } -static void rnpgbe_clr_tuple5_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id) +static void rnpgbe_clr_tuple5_hw_ops(struct rnpgbe_hw *hw, u16 pri_id) { struct rnpgbe_eth_info *eth = &hw->eth; @@ -2147,76 +2463,92 @@ static void rnpgbe_clr_tuple5_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id) } static void -rnpgbe_update_hw_status_hw_ops_n500(struct rnpgbe_hw *hw, - struct rnpgbe_hw_stats *hw_stats, - struct net_device_stats *net_stats) +rnpgbe_update_hw_status_hw_ops(struct rnpgbe_hw *hw, + struct rnpgbe_hw_stats *hw_stats, + struct net_device_stats *net_stats) { struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; struct rnpgbe_dma_info *dma = &hw->dma; struct rnpgbe_eth_info *eth = &hw->eth; struct rnpgbe_mac_info *mac = &hw->mac; + u64 mac_rx_broadcast = 0; + u64 mac_rx_multicast = 0; + u64 rx_over_errors = 0; int i; - net_stats->rx_errors += eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM) + - eth_rd32(eth, RNP500_RX_MAC_LEN_ERR_NUM) + - eth_rd32(eth, RNP500_RX_MAC_SFCS_ERR_NUM) + - eth_rd32(eth, RNP500_RX_MAC_GLEN_ERR_NUM) + - eth_rd32(eth, RNP500_RX_MAC_SLEN_ERR_NUM); + net_stats->rx_length_errors = eth_rd32(eth, RNPGBE_RXTRANS_LEN_ERR_NUM) + + eth_rd32(eth, RNPGBE_RXTRANS_SLEN_ERR_NUM) + + eth_rd32(eth, RNPGBE_RXTRANS_GLEN_ERR_NUM); + + net_stats->rx_crc_errors = eth_rd32(eth, RNPGBE_RXTRANS_FCS_ERR_NUM); + net_stats->rx_frame_errors = eth_rd32(eth, RNPGBE_RXTRANS_LCS_ERR_NUM); + net_stats->rx_fifo_errors = eth_rd32(eth, RNPGBE_RXTRANS_DROP); + net_stats->rx_missed_errors = net_stats->rx_fifo_errors + + eth_rd32(eth, RNPGBE_RXTRANS_CUT_ERR_PKTS) + + eth_rd32(eth, RNPGBE_RXTRANS_EXCEPT_NUM); - net_stats->collisions = eth_rd32(eth, RNP500_RX_MAC_LCS_ERR_NUM); - net_stats->rx_over_errors = eth_rd32(eth, RNP500_RX_MAC_CUT_NUM); - net_stats->rx_crc_errors = eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM); + net_stats->collisions = eth_rd32(eth, RNPGBE_RX_MAC_LCS_ERR_NUM); hw_stats->invalid_droped_packets = - eth_rd32(eth, RNP500_RX_DROP_PKT_NUM); - hw_stats->rx_capabity_lost = eth_rd32(eth, RNP500_RXTRANS_DROP) + - eth_rd32(eth, RNP500_RXTRANS_CUT_ERR_PKTS); + eth_rd32(eth, RNPGBE_RX_DROP_PKT_NUM); + hw_stats->rx_capabity_lost = eth_rd32(eth, RNPGBE_RXTRANS_DROP) + + eth_rd32(eth, RNPGBE_RXTRANS_CUT_ERR_PKTS); hw_stats->filter_dropped_packets = - eth_rd32(eth, RNP500_DECAP_PKT_DROP1_NUM); + eth_rd32(eth, RNPGBE_DECAP_PKT_DROP1_NUM); hw_stats->host_l2_match_drop = - eth_rd32(eth, RNP500_ETH_HOST_L2_DROP_PKTS); + eth_rd32(eth, RNPGBE_ETH_HOST_L2_DROP_PKTS); hw_stats->redir_input_match_drop = - eth_rd32(eth, RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS); + eth_rd32(eth, RNPGBE_ETH_REDIR_INPUT_MATCH_DROP_PKTS); hw_stats->redir_etype_match_drop = - eth_rd32(eth, RNP500_ETH_ETYPE_DROP_PKTS); + eth_rd32(eth, RNPGBE_ETH_ETYPE_DROP_PKTS); hw_stats->redir_tcp_syn_match_drop = - eth_rd32(eth, RNP500_ETH_TCP_SYN_DROP_PKTS); + eth_rd32(eth, RNPGBE_ETH_TCP_SYN_DROP_PKTS); hw_stats->redir_tuple5_match_drop = - eth_rd32(eth, RNP500_ETH_REDIR_TUPLE5_DROP_PKTS); + eth_rd32(eth, RNPGBE_ETH_REDIR_TUPLE5_DROP_PKTS); - hw_stats->tx_multicast = eth_rd32(eth, RNP500_TX_MULTI_NUM); - hw_stats->tx_broadcast = eth_rd32(eth, RNP500_TX_BROADCAST_NUM); - - hw_stats->mac_rx_broadcast = 0; - hw_stats->mac_rx_multicast = 0; + hw_stats->tx_multicast = eth_rd32(eth, RNPGBE_TX_MULTI_NUM); + hw_stats->tx_broadcast = eth_rd32(eth, RNPGBE_TX_BROADCAST_NUM); for (i = 0; i < adapter->num_tx_queues; i++) { struct rnpgbe_ring *tx_ring = adapter->tx_ring[i]; int idx = tx_ring->rnpgbe_queue_idx; + rx_over_errors += dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(idx)); /* we should use the true idex */ - hw_stats->mac_rx_multicast += - dma_rd32(dma, RNP500_VEB_VFMPRC(idx)); - hw_stats->mac_rx_broadcast += - dma_rd32(dma, RNP500_VEB_VFBPRC(idx)); - } - hw_stats->dma_rx_drop_cnt_0 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(0)); - hw_stats->dma_rx_drop_cnt_1 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(1)); - hw_stats->dma_rx_drop_cnt_2 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(2)); - hw_stats->dma_rx_drop_cnt_3 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(3)); - hw_stats->dma_rx_drop_cnt_4 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(4)); - hw_stats->dma_rx_drop_cnt_5 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(5)); - hw_stats->dma_rx_drop_cnt_6 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(6)); - hw_stats->dma_rx_drop_cnt_7 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(7)); + mac_rx_multicast += dma_rd32(dma, RNPGBE_VEB_VFMPRC(idx)); + mac_rx_broadcast += dma_rd32(dma, RNPGBE_VEB_VFBPRC(idx)); + } + + hw_stats->mac_rx_broadcast = mac_rx_broadcast; + hw_stats->mac_rx_multicast = mac_rx_multicast; + + net_stats->rx_over_errors = rx_over_errors; + net_stats->rx_errors = net_stats->rx_length_errors + + net_stats->rx_over_errors + + net_stats->rx_crc_errors + + net_stats->rx_frame_errors + + net_stats->rx_fifo_errors + + net_stats->rx_missed_errors; + + hw_stats->dma_rx_drop_cnt_0 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(0)); + hw_stats->dma_rx_drop_cnt_1 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(1)); + hw_stats->dma_rx_drop_cnt_2 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(2)); + hw_stats->dma_rx_drop_cnt_3 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(3)); + hw_stats->dma_rx_drop_cnt_4 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(4)); + hw_stats->dma_rx_drop_cnt_5 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(5)); + hw_stats->dma_rx_drop_cnt_6 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(6)); + hw_stats->dma_rx_drop_cnt_7 = dma_rd32(dma, RNPGBE_RX_TIMEOUT_DROP(7)); net_stats->multicast = hw_stats->mac_rx_multicast; hw_stats->ultra_short_cnt += mac_rd32(mac, GMAC_MANAGEMENT_RX_UNDERSIZE); - hw_stats->jumbo_cnt += mac_rd32(mac, RNP500_MAC_GLEN_ERR_NUM); + hw_stats->jumbo_cnt += mac_rd32(mac, RNPGBE_MAC_GLEN_ERR_NUM); + hw_stats->tx_pause += mac_rd32(mac, GMAC_MANAGEMENT_TX_PAUSE); + hw_stats->rx_pause += mac_rd32(mac, GMAC_MANAGEMENT_RX_PAUSE); } -const struct rnpgbe_stats rnp500_gstrings_net_stats[] = { +static const struct rnpgbe_stats rnpgbe_gstrings_net_stats[] = { RNP_NETDEV_STAT(rx_packets), RNP_NETDEV_STAT(tx_packets), RNP_NETDEV_STAT(rx_bytes), @@ -2238,8 +2570,8 @@ const struct rnpgbe_stats rnp500_gstrings_net_stats[] = { RNP_NETDEV_STAT(tx_heartbeat_errors), }; -#define RNP500_GLOBAL_STATS_LEN ARRAY_SIZE(rnp500_gstrings_net_stats) -static struct rnpgbe_stats rnp500_hwstrings_stats[] = { +#define RNPGBE_GLOBAL_STATS_LEN ARRAY_SIZE(rnpgbe_gstrings_net_stats) +static struct rnpgbe_stats rnpgbe_hwstrings_stats[] = { RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt), RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt), /* === drop== */ @@ -2261,23 +2593,25 @@ static struct rnpgbe_stats rnp500_hwstrings_stats[] = { RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast), RNP_HW_STAT("ultra_short_packets", hw_stats.ultra_short_cnt), RNP_HW_STAT("jumbo_packets", hw_stats.jumbo_cnt), + RNP_HW_STAT("mac_rx_pause_count", hw_stats.rx_pause), + RNP_HW_STAT("mac_tx_pause_count", hw_stats.tx_pause), }; -#define RNP500_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp500_hwstrings_stats) +#define RNPGBE_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnpgbe_hwstrings_stats) -#define RNP500_STATS_LEN \ - (RNP500_GLOBAL_STATS_LEN + RNP500_HWSTRINGS_STATS_LEN + \ +#define RNPGBE_STATS_LEN \ + (RNPGBE_GLOBAL_STATS_LEN + RNPGBE_HWSTRINGS_STATS_LEN + \ RNP_QUEUE_STATS_LEN) -static const char rnp500_gstrings_test[][ETH_GSTRING_LEN] = { +static const char rnpgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Eeprom test (offline)", "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; -#define RNP500_TEST_LEN (sizeof(rnp500_gstrings_test) / ETH_GSTRING_LEN) +#define RNPGBE_TEST_LEN (sizeof(rnpgbe_gstrings_test) / ETH_GSTRING_LEN) -static int rnp500_get_link_ksettings(struct net_device *netdev, +static int rnpgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2311,7 +2645,7 @@ static int rnp500_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); - if (autoneg) { + if (autoneg && !hw->fake_autoneg) { if (advertised_link & RNP_LINK_SPEED_1GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); @@ -2333,7 +2667,8 @@ static int rnp500_get_link_ksettings(struct net_device *netdev, } ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + if (!hw->fake_autoneg) + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; cmd->base.phy_address = adapter->phy_addr; @@ -2346,12 +2681,12 @@ static int rnp500_get_link_ksettings(struct net_device *netdev, } else { if (supported_link & RNP_LINK_SPEED_1GB_FULL) { ethtool_link_ksettings_add_link_mode(cmd, supported, - 1000baseT_Full); + 1000baseKX_Full); } if (advertised_link & RNP_LINK_SPEED_1GB_FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, - 1000baseT_Full); + 1000baseKX_Full); ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); @@ -2360,13 +2695,16 @@ static int rnp500_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - if (autoneg) { + if (autoneg && !hw->fake_autoneg) { ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; } else { cmd->base.autoneg = AUTONEG_DISABLE; } + if (hw->fake_autoneg) + cmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); @@ -2402,7 +2740,7 @@ static int rnp500_get_link_ksettings(struct net_device *netdev, return 0; } -static int rnp500_set_link_ksettings(struct net_device *netdev, +static int rnpgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2419,17 +2757,20 @@ static int rnp500_set_link_ksettings(struct net_device *netdev, /* only allow one speed at a time if no */ if (!cmd->base.autoneg) { - if (cmd->base.speed == SPEED_1000) - return -EINVAL; - /* maybe user set other speed than 100 or 10 */ - if (cmd->base.speed != SPEED_100 && + if (cmd->base.speed != SPEED_1000 && + cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) return -EINVAL; autoneg = 0; speed = cmd->base.speed; duplex = cmd->base.duplex; + if (cmd->base.speed == SPEED_1000) { + autoneg = 1; + hw->fake_autoneg = 1; + } } else { + hw->fake_autoneg = 1; autoneg = 1; } @@ -2468,6 +2809,10 @@ static int rnp500_set_link_ksettings(struct net_device *netdev, 10baseT_Half)) advertised |= RNP_LINK_SPEED_10_HALF; + /* if fake autoneg, adv 1G */ + if (hw->fake_autoneg) + advertised |= RNP_LINK_SPEED_1GB_FULL; + /* if autoneg on, adv can not set 0 */ if (!advertised && (autoneg)) return -EINVAL; @@ -2487,20 +2832,25 @@ static int rnp500_set_link_ksettings(struct net_device *netdev, } clear_bit(__RNP_IN_SFP_INIT, &adapter->state); } else { + /* if not sgmii, we not support close autoneg */ + if (!cmd->base.autoneg && !hw->is_sgmii) + return -EINVAL; if (cmd->base.duplex == DUPLEX_HALF) return -EINVAL; + if (cmd->base.speed != SPEED_1000) + return -EINVAL; } return err; } -static int rnp500_get_regs_len(struct net_device *netdev) +static int rnpgbe_get_regs_len(struct net_device *netdev) { -#define RNP500_REGS_LEN 1 - return RNP500_REGS_LEN * sizeof(u32); +#define RNPGBE_REGS_LEN 1 + return RNPGBE_REGS_LEN * sizeof(u32); } -static void rnp500_get_drvinfo(struct net_device *netdev, +static void rnpgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2511,27 +2861,25 @@ static void rnp500_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d 0x%08x", ((char *)&hw->fw_version)[3], - ((char *)&hw->fw_version)[2], ((char *)&hw->fw_version)[1], - ((char *)&hw->fw_version)[0], - hw->bd_uid | (hw->sfc_boot ? 0x80000000 : 0) | - (hw->pxe_en ? 0x40000000 : 0) | - (hw->ncsi_en ? 0x20000000 : 0)); + "%d.%d.%d.%d", ((unsigned char *)&hw->fw_version)[3], + ((unsigned char *)&hw->fw_version)[2], + ((unsigned char *)&hw->fw_version)[1], + ((unsigned char *)&hw->fw_version)[0]); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = RNP500_STATS_LEN; - drvinfo->testinfo_len = RNP500_TEST_LEN; - drvinfo->regdump_len = rnp500_get_regs_len(netdev); - drvinfo->n_priv_flags = RNP500_PRIV_FLAGS_STR_LEN; + drvinfo->n_stats = RNPGBE_STATS_LEN; + drvinfo->testinfo_len = RNPGBE_TEST_LEN; + drvinfo->regdump_len = rnpgbe_get_regs_len(netdev); + drvinfo->n_priv_flags = RNPGBE_PRIV_FLAGS_STR_LEN; } -static int rnp500_get_eeprom_len(struct net_device *netdev) +static int rnpgbe_get_eeprom_len(struct net_device *netdev) { return 0; } -static int rnp500_get_eeprom(struct net_device *netdev, +static int rnpgbe_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2565,13 +2913,13 @@ static int rnp500_get_eeprom(struct net_device *netdev, return 0; } -static int rnp500_set_eeprom(struct net_device *netdev, +static int rnpgbe_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *bytes) { return 0; } -static void rnp500_get_pauseparam(struct net_device *netdev, +static void rnpgbe_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2595,7 +2943,7 @@ static void rnp500_get_pauseparam(struct net_device *netdev, } } -static int rnp500_set_pauseparam(struct net_device *netdev, +static int rnpgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2624,7 +2972,7 @@ static int rnp500_set_pauseparam(struct net_device *netdev, return 0; } -static void rnp500_get_regs(struct net_device *netdev, +static void rnpgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -2632,13 +2980,23 @@ static void rnp500_get_regs(struct net_device *netdev, u32 *regs_buff = p; int i; - memset(p, 0, RNP500_REGS_LEN * sizeof(u32)); + memset(p, 0, RNPGBE_REGS_LEN * sizeof(u32)); + + for (i = 0; i < RNPGBE_REGS_LEN; i++) + regs_buff[i] = hw_rd32(hw, i * 4); +} + +static int rnpgbe_nway_reset(struct net_device *netdev) +{ + struct rnpgbe_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + rnpgbe_reinit_locked(adapter); - for (i = 0; i < RNP500_REGS_LEN; i++) - regs_buff[i] = rd32(hw, i * 4); + return 0; } -static void rnp500_get_strings(struct net_device *netdev, u32 stringset, +static void rnpgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { char *p = (char *)data; @@ -2646,19 +3004,19 @@ static void rnp500_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - for (i = 0; i < RNP500_TEST_LEN; i++) { - memcpy(data, rnp500_gstrings_test[i], ETH_GSTRING_LEN); + for (i = 0; i < RNPGBE_TEST_LEN; i++) { + memcpy(data, rnpgbe_gstrings_test[i], ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; case ETH_SS_STATS: - for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) { - memcpy(p, rnp500_gstrings_net_stats[i].stat_string, + for (i = 0; i < RNPGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, rnpgbe_gstrings_net_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < RNP500_HWSTRINGS_STATS_LEN; i++) { - memcpy(p, rnp500_hwstrings_stats[i].stat_string, + for (i = 0; i < RNPGBE_HWSTRINGS_STATS_LEN; i++) { + memcpy(p, rnpgbe_hwstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -2762,79 +3120,79 @@ static void rnp500_get_strings(struct net_device *netdev, u32 stringset, p += ETH_GSTRING_LEN; sprintf(p, "queue%u_rx_clean_count", i); p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_resync", i); + p += ETH_GSTRING_LEN; } break; case ETH_SS_PRIV_FLAGS: - memcpy(data, rnp500_priv_flags_strings, - RNP500_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + memcpy(data, rnpgbe_priv_flags_strings, + RNPGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); break; } } -static int rnp500_get_sset_count(struct net_device *netdev, int sset) +static int rnpgbe_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_TEST: - return RNP500_TEST_LEN; + return RNPGBE_TEST_LEN; case ETH_SS_STATS: - return RNP500_STATS_LEN; + return RNPGBE_STATS_LEN; case ETH_SS_PRIV_FLAGS: - return RNP500_PRIV_FLAGS_STR_LEN; + return RNPGBE_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } } -static u32 rnp500_get_priv_flags(struct net_device *netdev) +static u32 rnpgbe_get_priv_flags(struct net_device *netdev) { struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)netdev_priv(netdev); u32 priv_flags = 0; - if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) - priv_flags |= RNP500_MAC_LOOPBACK; if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) - priv_flags |= RNP500_PADDING_DEBUG; + priv_flags |= RNPGBE_PADDING_DEBUG; if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) - priv_flags |= RNP500_SIMULATE_DOWN; + priv_flags |= RNPGBE_SIMULATE_DOWN; if (adapter->priv_flags & RNP_PRIV_FLAG_ULTRA_SHORT) - priv_flags |= RNP500_ULTRA_SHORT; + priv_flags |= RNPGBE_ULTRA_SHORT; if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN) - priv_flags |= RNP500_DOUBLE_VLAN; + priv_flags |= RNPGBE_DOUBLE_VLAN; if (adapter->priv_flags & RNP_PRIV_FLAG_PAUSE_OWN) - priv_flags |= RNP500_PAUSE_OWN; + priv_flags |= RNPGBE_PAUSE_OWN; if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) - priv_flags |= RNP500_STAGS_ENABLE; + priv_flags |= RNPGBE_STAGS_ENABLE; if (adapter->priv_flags & RNP_PRIV_FLAG_JUMBO) - priv_flags |= RNP500_JUMBO_ENABLE; + priv_flags |= RNPGBE_JUMBO_ENABLE; if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) - priv_flags |= RNP500_TX_PADDING; + priv_flags |= RNPGBE_TX_PADDING; if (adapter->priv_flags & RNP_PRIV_FLAG_SOFT_TX_PADDING) - priv_flags |= RNP500_TX_SOLF_PADDING; + priv_flags |= RNPGBE_TX_SOLF_PADDING; if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) - priv_flags |= RNP500_REC_HDR_LEN_ERR; + priv_flags |= RNPGBE_REC_HDR_LEN_ERR; if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE) - priv_flags |= RNP500_DOUBLE_VLAN_RECEIVE; + priv_flags |= RNPGBE_DOUBLE_VLAN_RECEIVE; if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN) - priv_flags |= RNP500_RX_SKIP_EN; + priv_flags |= RNPGBE_RX_SKIP_EN; if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) - priv_flags |= RNP500_TCP_SYNC_PRIO; + priv_flags |= RNPGBE_TCP_SYNC_PRIO; if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) - priv_flags |= RNP500_REMAP_PRIO; + priv_flags |= RNPGBE_REMAP_PRIO; if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO) - priv_flags |= RNP500_8023_PRIO; + priv_flags |= RNPGBE_8023_PRIO; if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) - priv_flags |= RNP500_SRIOV_VLAN_MODE; + priv_flags |= RNPGBE_SRIOV_VLAN_MODE; if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP) - priv_flags |= RNP500_LLDP_EN; + priv_flags |= RNPGBE_LLDP_EN; if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) - priv_flags |= RNP500_FORCE_CLOSE; + priv_flags |= RNPGBE_FORCE_CLOSE; return priv_flags; } -static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) +static int rnpgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)netdev_priv(netdev); @@ -2848,20 +3206,12 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) data_old = dma_rd32(dma, RNP_DMA_CONFIG); data_new = data_old; - if (priv_flags & RNP500_MAC_LOOPBACK) { - SET_BIT(n500_mac_loopback, data_new); - adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK; - } else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) { - adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK); - CLR_BIT(n500_mac_loopback, data_new); - } - - if (priv_flags & RNP500_PADDING_DEBUG) + if (priv_flags & RNPGBE_PADDING_DEBUG) adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG; else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG); - if (priv_flags & RNP500_SIMULATE_DOWN) { + if (priv_flags & RNPGBE_SIMULATE_DOWN) { adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN; /* set check link again */ adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; @@ -2871,19 +3221,19 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; } - if (priv_flags & RNP500_ULTRA_SHORT) { + if (priv_flags & RNPGBE_ULTRA_SHORT) { int min = 33; adapter->priv_flags |= RNP_PRIV_FLAG_ULTRA_SHORT; - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_MIN_LEN, min); } else { int min = 60; adapter->priv_flags &= (~RNP_PRIV_FLAG_ULTRA_SHORT); - eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_MIN_LEN, min); } - if (priv_flags & RNP500_PAUSE_OWN) { + if (priv_flags & RNPGBE_PAUSE_OWN) { u32 data; data = mac_rd32(mac, GMAC_FLOW_CTRL); @@ -2899,7 +3249,7 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) mac_wr32(mac, GMAC_FLOW_CTRL, data); } - if (priv_flags & RNP500_DOUBLE_VLAN) { + if (priv_flags & RNPGBE_DOUBLE_VLAN) { adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN; eth->ops.set_double_vlan(eth, true); @@ -2908,15 +3258,15 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) eth->ops.set_double_vlan(eth, false); } - if (priv_flags & RNP500_STAGS_ENABLE) { - eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1); + if (priv_flags & RNPGBE_STAGS_ENABLE) { + eth_wr32(eth, RNPGBE_ETH_TX_VLAN_CONTROL_EANBLE, 1); adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED; eth->ops.set_vfta(eth, adapter->stags_vid, true); } else { int true_remove = 1; int vid = adapter->stags_vid; - eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 0); + eth_wr32(eth, RNPGBE_ETH_TX_VLAN_CONTROL_EANBLE, 0); adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED); if (vid) { if (test_bit(vid, adapter->active_vlans)) @@ -2928,7 +3278,7 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) } } - if (priv_flags & RNP500_JUMBO_ENABLE) { + if (priv_flags & RNPGBE_JUMBO_ENABLE) { adapter->priv_flags |= RNP_PRIV_FLAG_JUMBO; hw->ops.set_mtu(hw, netdev->mtu); } else { @@ -2936,42 +3286,41 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) hw->ops.set_mtu(hw, netdev->mtu); } - if (priv_flags & RNP500_TX_PADDING) + if (priv_flags & RNPGBE_TX_PADDING) adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING; else adapter->priv_flags &= (~RNP_PRIV_FLAG_TX_PADDING); - if (priv_flags & RNP500_TX_SOLF_PADDING) + if (priv_flags & RNPGBE_TX_SOLF_PADDING) adapter->priv_flags |= RNP_PRIV_FLAG_SOFT_TX_PADDING; else adapter->priv_flags &= (~RNP_PRIV_FLAG_SOFT_TX_PADDING); - if (priv_flags & RNP500_REC_HDR_LEN_ERR) { + if (priv_flags & RNPGBE_REC_HDR_LEN_ERR) { adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR; - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, - PKT_LEN_ERR | HDR_LEN_ERR); - + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, 0); } else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) { adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR); - eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0); + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, + PKT_LEN_ERR | HDR_LEN_ERR); } - if (priv_flags & RNP500_DOUBLE_VLAN_RECEIVE) { + if (priv_flags & RNPGBE_DOUBLE_VLAN_RECEIVE) { adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE; if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL)) - eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0); + eth_wr32(eth, RNPGBE_ETH_DOUBLE_VLAN_DROP, 0); } else { adapter->priv_flags &= (~RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE); if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL)) - eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1); + eth_wr32(eth, RNPGBE_ETH_DOUBLE_VLAN_DROP, 1); } - if (priv_flags & RNP500_TCP_SYNC_PRIO) + if (priv_flags & RNPGBE_TCP_SYNC_PRIO) adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC_PRIO; else adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC_PRIO); - if (priv_flags & RNP500_SRIOV_VLAN_MODE) { + if (priv_flags & RNPGBE_SRIOV_VLAN_MODE) { int i; adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE; @@ -3002,7 +3351,7 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) } } - if (priv_flags & RNP500_LLDP_EN) { + if (priv_flags & RNPGBE_LLDP_EN) { hw->ops.set_lldp(hw, true); adapter->priv_flags |= RNP_PRIV_FLAG_LLDP; } else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP) { @@ -3010,32 +3359,44 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) hw->ops.set_lldp(hw, false); } - if (priv_flags & RNP500_FORCE_CLOSE) - adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; - else - adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + /* if force close */ + if (hw->force_cap) { + if (priv_flags & RNPGBE_FORCE_CLOSE) { + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + hw->ops.driver_status(hw, true, + rnpgbe_driver_force_control_phy); + } + } else if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + hw->ops.driver_status(hw, false, + rnpgbe_driver_force_control_phy); + } + } else if (priv_flags & RNPGBE_FORCE_CLOSE) { + rnpgbe_err("firmware not support set this feature.\n"); + } skip_setup_vf_vlan_n500: - if (priv_flags & RNP500_8023_PRIO) { + if (priv_flags & RNPGBE_8023_PRIO) { adapter->priv_flags |= RNP_PRIV_FLAG_8023_PRIO; - eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1); + eth_wr32(eth, RNPGBE_PRIORITY_EN_8023, 1); } else { adapter->priv_flags &= (~RNP_PRIV_FLAG_8023_PRIO); - eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0); + eth_wr32(eth, RNPGBE_PRIORITY_EN_8023, 0); } - if (priv_flags & RNP500_REMAP_PRIO) + if (priv_flags & RNPGBE_REMAP_PRIO) adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_PRIO; else adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_PRIO); - if (priv_flags & (RNP500_8023_PRIO | RNP500_REMAP_PRIO)) { - eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1); - eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0); - eth_wr32(eth, RNP500_PRIORITY_EN, 1); + if (priv_flags & (RNPGBE_8023_PRIO | RNPGBE_REMAP_PRIO)) { + eth_wr32(eth, RNPGBE_PRIORITY_1_MARK, RNPGBE_PRIORITY_1); + eth_wr32(eth, RNPGBE_PRIORITY_0_MARK, RNPGBE_PRIORITY_0); + eth_wr32(eth, RNPGBE_PRIORITY_EN, 1); } else { - eth_wr32(eth, RNP500_PRIORITY_EN, 0); + eth_wr32(eth, RNPGBE_PRIORITY_EN, 0); } if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) { @@ -3059,7 +3420,7 @@ static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } -static void rnp500_get_ethtool_stats(struct net_device *netdev, +static void rnpgbe_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); @@ -3070,18 +3431,18 @@ static void rnp500_get_ethtool_stats(struct net_device *netdev, rnpgbe_update_stats(adapter); - for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) { + for (i = 0; i < RNPGBE_GLOBAL_STATS_LEN; i++) { p = (char *)net_stats + - rnp500_gstrings_net_stats[i].stat_offset; - data[i] = (rnp500_gstrings_net_stats[i].sizeof_stat == + rnpgbe_gstrings_net_stats[i].stat_offset; + data[i] = (rnpgbe_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < RNP500_HWSTRINGS_STATS_LEN; j++, i++) { - p = (char *)adapter + rnp500_hwstrings_stats[j].stat_offset; + for (j = 0; j < RNPGBE_HWSTRINGS_STATS_LEN; j++, i++) { + p = (char *)adapter + rnpgbe_hwstrings_stats[j].stat_offset; data[i] = - (rnp500_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ? + (rnpgbe_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } @@ -3226,35 +3587,37 @@ static void rnp500_get_ethtool_stats(struct net_device *netdev, data[i++] = ring->rx_stats.rx_equal_count; data[i++] = ring->rx_stats.rx_clean_times; data[i++] = ring->rx_stats.rx_clean_count; + data[i++] = ring->rx_stats.rx_resync; } } /* n500 ethtool_ops ops here */ -static const struct ethtool_ops rnp500_ethtool_ops = { - .get_link_ksettings = rnp500_get_link_ksettings, - .set_link_ksettings = rnp500_set_link_ksettings, - .get_drvinfo = rnp500_get_drvinfo, - .get_regs_len = rnp500_get_regs_len, - .get_regs = rnp500_get_regs, +static const struct ethtool_ops rnpgbe_ethtool_ops = { + .get_link_ksettings = rnpgbe_get_link_ksettings, + .set_link_ksettings = rnpgbe_set_link_ksettings, + .get_drvinfo = rnpgbe_get_drvinfo, + .get_regs_len = rnpgbe_get_regs_len, + .get_regs = rnpgbe_get_regs, .get_wol = rnpgbe_get_wol, .set_wol = rnpgbe_set_wol, + .nway_reset = rnpgbe_nway_reset, .get_link = ethtool_op_get_link, - .get_eeprom_len = rnp500_get_eeprom_len, - .get_eeprom = rnp500_get_eeprom, - .set_eeprom = rnp500_set_eeprom, + .get_eeprom_len = rnpgbe_get_eeprom_len, + .get_eeprom = rnpgbe_get_eeprom, + .set_eeprom = rnpgbe_set_eeprom, .get_ringparam = rnpgbe_get_ringparam, .set_ringparam = rnpgbe_set_ringparam, - .get_pauseparam = rnp500_get_pauseparam, - .set_pauseparam = rnp500_set_pauseparam, + .get_pauseparam = rnpgbe_get_pauseparam, + .set_pauseparam = rnpgbe_set_pauseparam, .get_msglevel = rnpgbe_get_msglevel, .set_msglevel = rnpgbe_set_msglevel, .self_test = rnpgbe_diag_test, - .get_strings = rnp500_get_strings, + .get_strings = rnpgbe_get_strings, .set_phys_id = rnpgbe_set_phys_id, - .get_sset_count = rnp500_get_sset_count, - .get_priv_flags = rnp500_get_priv_flags, - .set_priv_flags = rnp500_set_priv_flags, - .get_ethtool_stats = rnp500_get_ethtool_stats, + .get_sset_count = rnpgbe_get_sset_count, + .get_priv_flags = rnpgbe_get_priv_flags, + .set_priv_flags = rnpgbe_set_priv_flags, + .get_ethtool_stats = rnpgbe_get_ethtool_stats, .get_coalesce = rnpgbe_get_coalesce, .set_coalesce = rnpgbe_set_coalesce, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -3278,77 +3641,81 @@ static const struct ethtool_ops rnp500_ethtool_ops = { .flash_device = rnpgbe_flash_device, }; -static void rnpgbe_set_ethtool_hw_ops_n500(struct net_device *netdev) -{ - netdev->ethtool_ops = &rnp500_ethtool_ops; -} - -static struct rnpgbe_hw_operations hw_ops_n500 = { - .init_hw = &rnpgbe_init_hw_ops_n500, - .reset_hw = &rnpgbe_reset_hw_ops_n500, - .start_hw = &rnpgbe_start_hw_ops_n500, - .set_mtu = &rnpgbe_set_mtu_hw_ops_n500, - .set_vlan_filter_en = &rnpgbe_set_vlan_filter_en_hw_ops_n500, - .set_vlan_filter = &rnpgbe_set_vlan_filter_hw_ops_n500, - .set_veb_vlan_mask = &rnpgbe_set_veb_vlan_mask_hw_ops_n500, - .set_vf_vlan_filter = &rnpgbe_set_vf_vlan_filter_hw_ops_n500, - .clr_vfta = &rnpgbe_clr_vfta_hw_ops_n500, - .set_vlan_strip = &rnpgbe_set_vlan_strip_hw_ops_n500, - .set_mac = &rnpgbe_set_mac_hw_ops_n500, - .set_rx_mode = &rnpgbe_set_rx_mode_hw_ops_n500, - .set_rar_with_vf = &rnpgbe_set_rar_with_vf_hw_ops_n500, - .clr_rar = &rnpgbe_clr_rar_hw_ops_n500, - .clr_rar_all = &rnpgbe_clr_rar_all_hw_ops_n500, - .clr_vlan_veb = &rnpgbe_clr_vlan_veb_hw_ops_n500, - .set_txvlan_mode = &rnpgbe_set_txvlan_mode_hw_ops_n500, - .set_fcs_mode = &rnpgbe_set_fcs_mode_hw_ops_n500, - .set_mac_rx = &rnpgbe_set_mac_rx_hw_ops_n500, - .update_sriov_info = &rnpgbe_update_sriov_info_hw_ops_n500, - .set_sriov_status = &rnpgbe_set_sriov_status_hw_ops_n500, - .set_sriov_vf_mc = &rnpgbe_set_sriov_vf_mc_hw_ops_n500, - .set_pause_mode = &rnpgbe_set_pause_mode_hw_ops_n500, - .get_pause_mode = &rnpgbe_get_pause_mode_hw_ops_n500, - .update_hw_info = &rnpgbe_update_hw_info_hw_ops_n500, - .set_rx_hash = &rnpgbe_set_rx_hash_hw_ops_n500, - .set_rss_hfunc = &rnpgbe_set_rss_hfunc_hw_ops_n500, - .set_rss_key = &rnpgbe_set_rss_key_hw_ops_n500, - .set_rss_table = &rnpgbe_set_rss_table_hw_ops_n500, - .set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500, - .set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500, - .check_link = &rnpgbe_check_mac_link_hw_ops_n500, - .setup_link = &rnpgbe_setup_mac_link_hw_ops_n500, - .clean_link = &rnpgbe_clean_link_hw_ops_n500, - .init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops_n500, - .set_layer2_remapping = &rnpgbe_set_layer2_hw_ops_n500, - .clr_layer2_remapping = &rnpgbe_clr_layer2_hw_ops_n500, - .clr_all_layer2_remapping = &rnpgbe_clr_all_layer2_hw_ops_n500, - .set_tuple5_remapping = &rnpgbe_set_tuple5_hw_ops_n500, - .clr_tuple5_remapping = &rnpgbe_clr_tuple5_hw_ops_n500, - .clr_all_tuple5_remapping = &rnpgbe_clr_all_tuple5_hw_ops_n500, - .set_tcp_sync_remapping = &rnpgbe_set_tcp_sync_hw_ops_n500, - .set_rx_skip = &rnpgbe_set_rx_skip_hw_ops_n500, - .set_outer_vlan_type = &rnpgbe_set_outer_vlan_type_hw_ops_n500, - .update_hw_status = &rnpgbe_update_hw_status_hw_ops_n500, - .update_rx_drop = &rnpgbe_update_hw_rx_drop_hw_ops_n500, - .setup_ethtool = &rnpgbe_set_ethtool_hw_ops_n500, - .phy_read_reg = &rnpgbe_phy_read_reg_hw_ops_n500, - .phy_write_reg = &rnpgbe_phy_write_reg_hw_ops_n500, - .setup_wol = &rnpgbe_setup_wol_hw_ops_n500, - .set_vf_vlan_mode = &rnpgbe_set_vf_vlan_mode_hw_ops_n500, - .driver_status = &rnpgbe_driver_status_hw_ops_n500, - .setup_eee = &rnpgbe_setup_eee_hw_ops_n500, - .set_eee_mode = &rnpgbe_set_eee_mode_hw_ops_n500, - .reset_eee_mode = &rnpgbe_reset_eee_mode_hw_ops_n500, - .set_eee_timer = &rnpgbe_set_eee_timer_hw_ops_n500, - .set_eee_pls = &rnpgbe_set_eee_pls_hw_ops_n500, - .get_lpi_status = &rnpgbe_get_lpi_status_hw_ops_n500, - .get_ncsi_mac = &rnpgbe_get_ncsi_mac_hw_ops_n500, - .get_ncsi_vlan = &rnpgbe_get_ncsi_vlan_hw_ops_n500, - .set_lldp = &rnpgbe_set_lldp_hw_ops_n500, - .get_lldp = &rnpgbe_get_lldp_hw_ops_n500, +static void rnpgbe_set_ethtool_hw_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &rnpgbe_ethtool_ops; +} + +static struct rnpgbe_hw_operations hw_ops_rnpgbe = { + .init_hw = &rnpgbe_init_hw_ops, + .reset_hw = &rnpgbe_reset_hw_ops, + .start_hw = &rnpgbe_start_hw_ops, + .set_mtu = &rnpgbe_set_mtu_hw_ops, + .set_vlan_filter_en = &rnpgbe_set_vlan_filter_en_hw_ops, + .set_vlan_filter = &rnpgbe_set_vlan_filter_hw_ops, + .set_veb_vlan_mask = &rnpgbe_set_veb_vlan_mask_hw_ops, + .set_vf_vlan_filter = &rnpgbe_set_vf_vlan_filter_hw_ops, + .clr_vfta = &rnpgbe_clr_vfta_hw_ops, + .set_vlan_strip = &rnpgbe_set_vlan_strip_hw_ops, + .set_mac = &rnpgbe_set_mac_hw_ops, + .set_rx_mode = &rnpgbe_set_rx_mode_hw_ops, + .set_rar_with_vf = &rnpgbe_set_rar_with_vf_hw_ops, + .clr_rar = &rnpgbe_clr_rar_hw_ops, + .clr_rar_all = &rnpgbe_clr_rar_all_hw_ops, + .clr_vlan_veb = &rnpgbe_clr_vlan_veb_hw_ops, + .set_txvlan_mode = &rnpgbe_set_txvlan_mode_hw_ops, + .set_fcs_mode = &rnpgbe_set_fcs_mode_hw_ops, + .set_mac_rx = &rnpgbe_set_mac_rx_hw_ops, + .update_sriov_info = &rnpgbe_update_sriov_info_hw_ops, + .set_sriov_status = &rnpgbe_set_sriov_status_hw_ops, + .set_sriov_vf_mc = &rnpgbe_set_sriov_vf_mc_hw_ops, + .set_pause_mode = &rnpgbe_set_pause_mode_hw_ops, + .get_pause_mode = &rnpgbe_get_pause_mode_hw_ops, + .update_hw_info = &rnpgbe_update_hw_info_hw_ops, + .set_rx_hash = &rnpgbe_set_rx_hash_hw_ops, + .set_rss_hfunc = &rnpgbe_set_rss_hfunc_hw_ops, + .set_rss_key = &rnpgbe_set_rss_key_hw_ops, + .set_rss_table = &rnpgbe_set_rss_table_hw_ops, + .set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops, + .set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops, + .check_link = &rnpgbe_check_mac_link_hw_ops, + .setup_link = &rnpgbe_setup_mac_link_hw_ops, + .clean_link = &rnpgbe_clean_link_hw_ops, + .init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops, + .set_layer2_remapping = &rnpgbe_set_layer2_hw_ops, + .clr_layer2_remapping = &rnpgbe_clr_layer2_hw_ops, + .clr_all_layer2_remapping = &rnpgbe_clr_all_layer2_hw_ops, + .set_tuple5_remapping = &rnpgbe_set_tuple5_hw_ops, + .clr_tuple5_remapping = &rnpgbe_clr_tuple5_hw_ops, + .clr_all_tuple5_remapping = &rnpgbe_clr_all_tuple5_hw_ops, + .set_tcp_sync_remapping = &rnpgbe_set_tcp_sync_hw_ops, + .set_rx_skip = &rnpgbe_set_rx_skip_hw_ops, + .set_outer_vlan_type = &rnpgbe_set_outer_vlan_type_hw_ops, + .update_hw_status = &rnpgbe_update_hw_status_hw_ops, + .update_rx_drop = &rnpgbe_update_hw_rx_drop_hw_ops, + .setup_ethtool = &rnpgbe_set_ethtool_hw_ops, + .get_thermal_sensor_data = &rnpgbe_get_thermal_sensor_data_hw_ops, + .init_thermal_sensor_thresh = + &rnpgbe_init_thermal_sensor_thresh_hw_ops, + .phy_read_reg = &rnpgbe_phy_read_reg_hw_ops, + .phy_write_reg = &rnpgbe_phy_write_reg_hw_ops, + .setup_wol = &rnpgbe_setup_wol_hw_ops, + .set_vf_vlan_mode = &rnpgbe_set_vf_vlan_mode_hw_ops, + .driver_status = &rnpgbe_driver_status_hw_ops, + .setup_eee = &rnpgbe_setup_eee_hw_ops, + .set_eee_mode = &rnpgbe_set_eee_mode_hw_ops, + .reset_eee_mode = &rnpgbe_reset_eee_mode_hw_ops, + .set_eee_timer = &rnpgbe_set_eee_timer_hw_ops, + .set_eee_pls = &rnpgbe_set_eee_pls_hw_ops, + .get_lpi_status = &rnpgbe_get_lpi_status_hw_ops, + .get_ncsi_mac = &rnpgbe_get_ncsi_mac_hw_ops, + .get_ncsi_vlan = &rnpgbe_get_ncsi_vlan_hw_ops, + .set_lldp = &rnpgbe_set_lldp_hw_ops, + .get_lldp = &rnpgbe_get_lldp_hw_ops, + .dump_debug_regs = &rnpgbe_dump_debug_regs_hw_ops, }; -static void rnpgbe_mac_set_rx_n500(struct rnpgbe_mac_info *mac, bool status) +static void rnpgbe_mac_set_rx(struct rnpgbe_mac_info *mac, bool status) { u32 value = mac_rd32(mac, GMAC_CONTROL); @@ -3363,52 +3730,31 @@ static void rnpgbe_mac_set_rx_n500(struct rnpgbe_mac_info *mac, bool status) mac_wr32(mac, GMAC_FRAME_FILTER, value | 1); } -static void rnpgbe_mac_set_speed_n500(struct rnpgbe_mac_info *mac, bool link, - u32 speed, bool duplex) +static void rnpgbe_mac_set_speed(struct rnpgbe_mac_info *mac, bool link, + u32 speed, bool duplex) { -#define SPEED_MASK (RNP_DM_MASK | RNP_FES_MASK | RNP_PS_MASK | RNP_LUD_MASK) - u32 value = mac_rd32(mac, GMAC_CONTROL); - - value &= (~SPEED_MASK); - - if (link) - value |= RNP_LUD_MASK; - - if (duplex) - value |= RNP_DM_MASK; - switch (speed) { - case RNP_LINK_SPEED_100_FULL: - value |= RNP_PS_MASK; - value |= RNP_FES_MASK; - break; - case RNP_LINK_SPEED_10_FULL: - value |= RNP_PS_MASK; - break; - } - - mac_wr32(mac, GMAC_CONTROL, value); } -static void rnpgbe_mac_fcs_n500(struct rnpgbe_mac_info *mac, bool status) +static void rnpgbe_mac_fcs(struct rnpgbe_mac_info *mac, bool status) { -#define RNP500_CST_MASK BIT(25) +#define RNPGBE_CST_MASK BIT(25) u32 value = mac_rd32(mac, GMAC_CONTROL); if (status) - value &= (~RNP500_CST_MASK); + value &= (~RNPGBE_CST_MASK); else - value |= (RNP500_CST_MASK); + value |= (RNPGBE_CST_MASK); mac_wr32(mac, GMAC_CONTROL, value); } /** - * rnpgbe_mac_fc_mode_n500 - Enable flow control + * rnpgbe_mac_fc_mode - Enable flow control * @mac: pointer to hardware structure * * Enable flow control according to the current settings. **/ -static s32 rnpgbe_mac_fc_mode_n500(struct rnpgbe_mac_info *mac) +static s32 rnpgbe_mac_fc_mode(struct rnpgbe_mac_info *mac) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back; s32 ret_val = 0; @@ -3512,10 +3858,10 @@ static int rnpgbe_mdio_read(struct rnpgbe_mac_info *mac, int phyreg) return data; } -static void rnpgbe_mac_check_link_n500(struct rnpgbe_mac_info *mac, - rnpgbe_link_speed *speed, - bool *link_up, - bool link_up_wait_to_complete) +static void rnpgbe_mac_check_link(struct rnpgbe_mac_info *mac, + rnpgbe_link_speed *speed, + bool *link_up, + bool link_up_wait_to_complete) { struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back; u32 data; @@ -3559,19 +3905,19 @@ static void rnpgbe_mac_check_link_n500(struct rnpgbe_mac_info *mac, } } -static void rnpgbe_mac_set_mac_n500(struct rnpgbe_mac_info *mac, u8 *addr, int index) +static void rnpgbe_mac_set_mac(struct rnpgbe_mac_info *mac, u8 *addr, int index) { u32 rar_low, rar_high = 0; rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8); - mac_wr32(mac, RNP500_MAC_UNICAST_HIGH(index), rar_high); - mac_wr32(mac, RNP500_MAC_UNICAST_LOW(index), rar_low); + mac_wr32(mac, RNPGBE_MAC_UNICAST_HIGH(index), rar_high); + mac_wr32(mac, RNPGBE_MAC_UNICAST_LOW(index), rar_low); } -static int rnpgbe_mac_mdio_read_n500(struct rnpgbe_mac_info *mac, u32 phyreg, - u32 *regvalue) +static int rnpgbe_mac_mdio_read(struct rnpgbe_mac_info *mac, u32 phyreg, + u32 *regvalue) { unsigned int mii_address = mac->mii.addr; unsigned int mii_data = mac->mii.data; @@ -3600,8 +3946,8 @@ static int rnpgbe_mac_mdio_read_n500(struct rnpgbe_mac_info *mac, u32 phyreg, return data; } -static int rnpgbe_mac_mdio_write_n500(struct rnpgbe_mac_info *mac, int phyreg, - int phydata) +static int rnpgbe_mac_mdio_write(struct rnpgbe_mac_info *mac, int phyreg, + int phydata) { unsigned int mii_address = mac->mii.addr; unsigned int mii_data = mac->mii.data; @@ -3627,7 +3973,7 @@ static int rnpgbe_mac_mdio_write_n500(struct rnpgbe_mac_info *mac, int phyreg, return poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100); } -static void rnpgbe_mac_pmt_n500(struct rnpgbe_mac_info *mac, u32 mode, bool ncsi_en) +static void rnpgbe_mac_pmt(struct rnpgbe_mac_info *mac, u32 mode, bool ncsi_en) { unsigned int pmt = 0; @@ -3646,8 +3992,8 @@ static void rnpgbe_mac_pmt_n500(struct rnpgbe_mac_info *mac, u32 mode, bool ncsi mac_wr32(mac, GMAC_PMT, pmt); } -static void rnpgbe_mac_set_eee_mode_n500(struct rnpgbe_mac_info *mac, - bool en_tx_lpi_clockgating) +static void rnpgbe_mac_set_eee_mode(struct rnpgbe_mac_info *mac, + bool en_tx_lpi_clockgating) { u32 value = 0; @@ -3662,7 +4008,7 @@ static void rnpgbe_mac_set_eee_mode_n500(struct rnpgbe_mac_info *mac, mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value); } -static void rnpgbe_mac_reset_eee_mode_n500(struct rnpgbe_mac_info *mac) +static void rnpgbe_mac_reset_eee_mode(struct rnpgbe_mac_info *mac) { u32 value = 0; @@ -3671,7 +4017,7 @@ static void rnpgbe_mac_reset_eee_mode_n500(struct rnpgbe_mac_info *mac) mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value); } -static void rnpgbe_mac_set_eee_timer_n500(struct rnpgbe_mac_info *mac, int ls, int tw) +static void rnpgbe_mac_set_eee_timer(struct rnpgbe_mac_info *mac, int ls, int tw) { int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16); @@ -3686,7 +4032,7 @@ static void rnpgbe_mac_set_eee_timer_n500(struct rnpgbe_mac_info *mac, int ls, i mac_wr32(mac, GMAC_LPI_TIMER_CTRL, value); } -static void rnpgbe_mac_set_eee_pls_n500(struct rnpgbe_mac_info *mac, int link) +static void rnpgbe_mac_set_eee_pls(struct rnpgbe_mac_info *mac, int link) { u32 value = 0; @@ -3700,7 +4046,7 @@ static void rnpgbe_mac_set_eee_pls_n500(struct rnpgbe_mac_info *mac, int link) mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value); } -static u32 rnpgbe_mac_get_lpi_status_n500(struct rnpgbe_mac_info *mac) +static u32 rnpgbe_mac_get_lpi_status(struct rnpgbe_mac_info *mac) { if (mac_rd32(mac, GMAC_INT_STATUS) & GMAC_INT_STATUS_LPIIS) return mac_rd32(mac, GMAC_LPI_CTRL_STATUS); @@ -3708,21 +4054,21 @@ static u32 rnpgbe_mac_get_lpi_status_n500(struct rnpgbe_mac_info *mac) return 0; } -static struct rnpgbe_mac_operations mac_ops_n500 = { - .set_mac_rx = &rnpgbe_mac_set_rx_n500, - .set_mac_speed = &rnpgbe_mac_set_speed_n500, - .set_mac_fcs = &rnpgbe_mac_fcs_n500, - .set_fc_mode = &rnpgbe_mac_fc_mode_n500, - .check_link = &rnpgbe_mac_check_link_n500, - .set_mac = &rnpgbe_mac_set_mac_n500, - .mdio_write = &rnpgbe_mac_mdio_write_n500, - .mdio_read = &rnpgbe_mac_mdio_read_n500, - .pmt = &rnpgbe_mac_pmt_n500, - .set_eee_mode = rnpgbe_mac_set_eee_mode_n500, - .reset_eee_mode = rnpgbe_mac_reset_eee_mode_n500, - .set_eee_timer = rnpgbe_mac_set_eee_timer_n500, - .set_eee_pls = rnpgbe_mac_set_eee_pls_n500, - .get_lpi_status = rnpgbe_mac_get_lpi_status_n500, +static struct rnpgbe_mac_operations mac_ops_rnpgbe = { + .set_mac_rx = &rnpgbe_mac_set_rx, + .set_mac_speed = &rnpgbe_mac_set_speed, + .set_mac_fcs = &rnpgbe_mac_fcs, + .set_fc_mode = &rnpgbe_mac_fc_mode, + .check_link = &rnpgbe_mac_check_link, + .set_mac = &rnpgbe_mac_set_mac, + .mdio_write = &rnpgbe_mac_mdio_write, + .mdio_read = &rnpgbe_mac_mdio_read, + .pmt = &rnpgbe_mac_pmt, + .set_eee_mode = rnpgbe_mac_set_eee_mode, + .reset_eee_mode = rnpgbe_mac_reset_eee_mode, + .set_eee_timer = rnpgbe_mac_set_eee_timer, + .set_eee_pls = rnpgbe_mac_set_eee_pls, + .get_lpi_status = rnpgbe_mac_get_lpi_status, }; @@ -3736,20 +4082,20 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; int i; - nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE; + nic->nic_base_addr = hw->hw_addr + RNPGBE_NIC_BASE; /* setup dma info */ dma->dma_base_addr = hw->hw_addr; - dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE; + dma->dma_ring_addr = hw->hw_addr + RNPGBE_RING_BASE; dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES; dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES; dma->back = hw; - memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops)); + memcpy(&hw->dma.ops, &dma_ops_rnpgbe, sizeof(hw->dma.ops)); /* setup eth info */ - memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops)); - eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE; + memcpy(&hw->eth.ops, ð_ops_rnpgbe, sizeof(hw->eth.ops)); + eth->eth_base_addr = hw->hw_addr + RNPGBE_ETH_BASE; eth->back = hw; - eth->mc_filter_type = 4; + eth->mc_filter_type = 0; eth->mcft_size = RNP_N500_MC_TBL_SIZE; eth->vft_size = RNP_N500_VFT_TBL_SIZE; eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM; @@ -3757,11 +4103,11 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES; /* setup mac info */ - memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops)); - mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE; + memcpy(&hw->mac.ops, &mac_ops_rnpgbe, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNPGBE_MAC_BASE; mac->back = hw; mac->mac_type = mac_dwc_g; - mac->mc_filter_type = 4; + mac->mc_filter_type = 0; mac->mcft_size = 2; mac->vft_size = 1; mac->num_rar_entries = RNP_N500_RAR_ENTRIES; @@ -3798,14 +4144,14 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) /* setup some fdir resource */ hw->min_length = RNP_MIN_MTU; - hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE; + hw->max_length = RNPGBE_MAX_JUMBO_FRAME_SIZE; hw->max_msix_vectors = RNP_N500_MSIX_VECTORS; hw->num_rar_entries = RNP_N500_RAR_ENTRIES; hw->fdir_mode = fdir_mode_tuple5; hw->max_vfs = RNP_N500_MAX_VF; hw->max_vfs_noari = 1; - hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1; - hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1; + hw->layer2_count = RNPGBE_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNPGBE_MAX_TUPLE5_FILTERS - 1; /* n500 support magic wol */ hw->wol_supported = WAKE_MAGIC; hw->num_vebvlan_entries = 8; @@ -3817,7 +4163,7 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) hw->sriov_ring_limit = 1; hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500; hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1; - memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops)); + memcpy(&hw->ops, &hw_ops_rnpgbe, sizeof(hw->ops)); hw->supported_link = RNP_LINK_SPEED_1GB_FULL; /* mbx setup */ @@ -3830,13 +4176,14 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) mbx->pf_vf_mbox_mask_hi = 0; mbx->cpu_pf_shm_base = 0x2d000; mbx->pf2cpu_mbox_ctrl = 0x2e000; - mbx->pf2cpu_mbox_mask = 0x2e200; + mbx->cpu_pf_mbox_mask = 0x2e200; mbx->cpu_vf_share_ram = 0x2b000; mbx->share_size = 512; adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN; adapter->drop_time = 100; + hw->msix_vector_base = 0x28200; /*initialization default pause flow */ hw->fc.requested_mode = PAUSE_AUTO; hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; @@ -3845,8 +4192,8 @@ static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw) /* we start from auto mode */ hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO; for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { - hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER; - hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER; + hw->fc.high_water[i] = RNPGBE_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNPGBE_DEFAULT_LOW_WATER; } hw->eeprom.word_size = 10; @@ -3863,20 +4210,20 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; int i; - nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE; + nic->nic_base_addr = hw->hw_addr + RNPGBE_NIC_BASE; /* setup dma info */ dma->dma_base_addr = hw->hw_addr; - dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE; + dma->dma_ring_addr = hw->hw_addr + RNPGBE_RING_BASE; dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES; dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES; dma->back = hw; - memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops)); + memcpy(&hw->dma.ops, &dma_ops_rnpgbe, sizeof(hw->dma.ops)); /* setup eth info */ - memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops)); - eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE; + memcpy(&hw->eth.ops, ð_ops_rnpgbe, sizeof(hw->eth.ops)); + eth->eth_base_addr = hw->hw_addr + RNPGBE_ETH_BASE; eth->back = hw; - eth->mc_filter_type = 4; + eth->mc_filter_type = 0; eth->mcft_size = RNP_N500_MC_TBL_SIZE; eth->vft_size = RNP_N500_VFT_TBL_SIZE; eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM; @@ -3884,12 +4231,12 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES; /* setup mac info */ - memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops)); - mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE; + memcpy(&hw->mac.ops, &mac_ops_rnpgbe, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNPGBE_MAC_BASE; mac->back = hw; mac->mac_type = mac_dwc_g; /* move this to eth todo */ - mac->mc_filter_type = 4; + mac->mc_filter_type = 0; mac->mcft_size = 2; mac->vft_size = 1; mac->num_rar_entries = RNP_N500_RAR_ENTRIES; @@ -3923,14 +4270,14 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) /* setup some fdir resource */ hw->min_length = RNP_MIN_MTU; - hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE; + hw->max_length = RNPGBE_MAX_JUMBO_FRAME_SIZE; hw->max_msix_vectors = RNP_N500_MSIX_VECTORS; hw->num_rar_entries = RNP_N500_RAR_ENTRIES; hw->fdir_mode = fdir_mode_tuple5; hw->max_vfs = RNP_N500_MAX_VF; - hw->max_vfs_noari = 1; - hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1; - hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1; + hw->max_vfs_noari = 7; + hw->layer2_count = RNPGBE_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNPGBE_MAX_TUPLE5_FILTERS - 1; hw->wol_supported = WAKE_MAGIC; hw->num_vebvlan_entries = 8; hw->default_rx_queue = 0; @@ -3941,7 +4288,7 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) hw->sriov_ring_limit = 1; hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500; hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1; - memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops)); + memcpy(&hw->ops, &hw_ops_rnpgbe, sizeof(hw->ops)); hw->supported_link = RNP_LINK_SPEED_1GB_FULL; mbx->vf2pf_mbox_vec_base = 0x29200; @@ -3953,13 +4300,14 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) mbx->pf_vf_mbox_mask_hi = 0; mbx->cpu_pf_shm_base = 0x2d900; mbx->pf2cpu_mbox_ctrl = 0x2e900; - mbx->pf2cpu_mbox_mask = 0x2eb00; + mbx->cpu_pf_mbox_mask = 0x2eb00; mbx->cpu_vf_share_ram = 0x2b900; mbx->share_size = 512; adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN; adapter->drop_time = 100; + hw->msix_vector_base = 0x28000; /*initialization default pause flow */ /* we start from auto */ hw->fc.requested_mode = PAUSE_AUTO; @@ -3968,8 +4316,8 @@ static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw) hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO; for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { - hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER; - hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER; + hw->fc.high_water[i] = RNPGBE_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNPGBE_DEFAULT_LOW_WATER; } hw->eeprom.word_size = 10; @@ -3983,9 +4331,9 @@ struct rnpgbe_info rnpgbe_n500_info = { .rss_type = rnpgbe_rss_n500, .hw_type = rnpgbe_hw_n500, .get_invariants = &rnpgbe_get_invariants_n500, - .mac_ops = &mac_ops_n500, + .mac_ops = &mac_ops_rnpgbe, .eeprom_ops = NULL, - .mbx_ops = &mbx_ops_generic, + .mbx_ops = &rnpgbe_mbx_ops_generic, }; struct rnpgbe_info rnpgbe_n210_info = { @@ -3995,7 +4343,19 @@ struct rnpgbe_info rnpgbe_n210_info = { .rss_type = rnpgbe_rss_n500, .hw_type = rnpgbe_hw_n210, .get_invariants = &rnpgbe_get_invariants_n210, - .mac_ops = &mac_ops_n500, + .mac_ops = &mac_ops_rnpgbe, + .eeprom_ops = NULL, + .mbx_ops = &rnpgbe_mbx_ops_generic, +}; + +struct rnpgbe_info rnpgbe_n210L_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnpgbe_rss_n500, + .hw_type = rnpgbe_hw_n210L, + .get_invariants = &rnpgbe_get_invariants_n210, + .mac_ops = &mac_ops_rnpgbe, .eeprom_ops = NULL, - .mbx_ops = &mbx_ops_generic, + .mbx_ops = &rnpgbe_mbx_ops_generic, }; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h index 18a848c4047f..2b9f6b8a88b6 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h @@ -83,28 +83,24 @@ static inline unsigned int rnpgbe_rd_reg(void *reg) iowrite32((val), (void *)(reg)); \ } while (0) #else -#define rnpgbe_rd_reg(reg) readl((void *)(reg)) -#define rnpgbe_wr_reg(reg, val) writel((val), (void *)(reg)) +#define rnpgbe_rd_reg(reg) readl(reg) +#define rnpgbe_wr_reg(reg, val) writel((val), reg) #endif -#define rd32(hw, off) rnpgbe_rd_reg((hw)->hw_addr + (off)) -#define wr32(hw, off, val) rnpgbe_wr_reg((hw)->hw_addr + (off), (val)) - #define nic_rd32(nic, off) rnpgbe_rd_reg((nic)->nic_base_addr + (off)) #define nic_wr32(nic, off, val) \ rnpgbe_wr_reg((nic)->nic_base_addr + (off), (val)) -#define dma_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_base_addr + (off)) -#define dma_wr32(dma, off, val) \ - rnpgbe_wr_reg((dma)->dma_base_addr + (off), (val)) - #define dma_ring_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_ring_addr + (off)) #define dma_ring_wr32(dma, off, val) \ rnpgbe_wr_reg((dma)->dma_ring_addr + (off), (val)) -#define eth_rd32(eth, off) rnpgbe_rd_reg((eth)->eth_base_addr + (off)) -#define eth_wr32(eth, off, val) \ - rnpgbe_wr_reg((eth)->eth_base_addr + (off), (val)) +u32 hw_rd32(struct rnpgbe_hw *hw, u32 off); +void hw_wr32(struct rnpgbe_hw *hw, u32 off, u32 val); +u32 dma_rd32(struct rnpgbe_dma_info *dma, u32 off); +void dma_wr32(struct rnpgbe_dma_info *dma, u32 off, u32 val); +u32 eth_rd32(struct rnpgbe_eth_info *eth, u32 off); +void eth_wr32(struct rnpgbe_eth_info *eth, u32 off, u32 val); #define mac_rd32(mac, off) rnpgbe_rd_reg((mac)->mac_addr + (off)) #define mac_wr32(mac, off, val) rnpgbe_wr_reg((mac)->mac_addr + (off), (val)) @@ -162,7 +158,7 @@ static inline unsigned int rnpgbe_rd_reg_1(int ring, u32 off, void *reg) ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt, \ ##args)) : \ - NULL) + (void)0) /* ==== log helper === */ #ifdef HW_DEBUG diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c index bf77025b370a..4746a9353d75 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c @@ -214,6 +214,94 @@ static const struct file_operations rnpgbe_dbg_netdev_ops_fops = { .write = rnpgbe_dbg_netdev_ops_write, }; +static void debugfs_command_help(struct device *dev, char *cmd_buf) +{ + dev_info(dev, "unknown or invalid command '%s'\n", cmd_buf); + dev_info(dev, "available commands\n"); + dev_info(dev, "\t dump ring\n"); + dev_info(dev, "\t dump dma\n"); + dev_info(dev, "\t dump tx\n"); + dev_info(dev, "\t dump rx\n"); +} + +/** + * rnpgbe_dbg_command_write - write into netdev_ops datum + * @file: the opened file + * @buf: where to find the user's data + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnpgbe_dbg_command_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct rnpgbe_adapter *adapter = file->private_data; + struct device *dev = &adapter->pdev->dev; + struct rnpgbe_hw *hw = &adapter->hw; + char *cmd_buf, *cmd_buf_tmp; + ssize_t ret; + char **argv; + int argc; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + cmd_buf = memdup_user(buf, count + 1); + if (IS_ERR(cmd_buf)) + return PTR_ERR(cmd_buf); + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = (size_t)cmd_buf_tmp - (size_t)cmd_buf + 1; + } + + argv = argv_split(GFP_KERNEL, cmd_buf, &argc); + if (!argv) { + ret = -ENOMEM; + goto err_copy_from_user; + } + + if (argc == 2 && !strncmp(argv[0], "dump", 4)) { + ret = hw->ops.dump_debug_regs(hw, argv[1]); + if (ret) { + debugfs_command_help(dev, cmd_buf); + ret = -EINVAL; + goto command_write_error; + } + } else { + debugfs_command_help(dev, cmd_buf); + ret = -EINVAL; + goto command_write_error; + } + + /* if we get here, nothing went wrong; return bytes copied */ + ret = (ssize_t)count; + +command_write_error: + argv_free(argv); +err_copy_from_user: + kfree(cmd_buf); + + /* This function always consumes all of the written input, or produces + * an error. Check and enforce this. Otherwise, the write operation + * won't complete properly. + */ + if (WARN_ON(ret != (ssize_t)count && ret >= 0)) + ret = -EIO; + + return ret; +} + +static const struct file_operations rnpgbe_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = NULL, + .write = rnpgbe_dbg_command_write, +}; + static ssize_t rnpgbe_dbg_netdev_temp_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) @@ -258,7 +346,7 @@ static const struct file_operations rnpgbe_dbg_netdev_temp = { **/ void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter) { - const char *name = adapter->name; + const char *name = pci_name(adapter->pdev); struct dentry *pfile; adapter->rnpgbe_dbg_adapter = debugfs_create_dir(name, rnpgbe_dbg_root); @@ -280,6 +368,13 @@ void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter) adapter, &rnpgbe_dbg_netdev_temp); if (!pfile) e_dev_err("debugfs temp for %s failed\n", name); + + pfile = debugfs_create_file("command", 0600, + adapter->rnpgbe_dbg_adapter, + adapter, &rnpgbe_dbg_command_fops); + if (!pfile) + e_dev_err("debugfs temp for command failed\n"); + } else { e_dev_err("debugfs entry for %s failed\n", name); } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c index 7df6350c2c74..05ce63e43b06 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c @@ -9,6 +9,7 @@ #include #include #include +#include "rnpgbe_common.h" #include #include #include @@ -219,7 +220,7 @@ static bool rnpgbe_reg_test(struct rnpgbe_adapter *adapter, u64 *data) test->mask, test->write); break; case WRITE_NO_TEST: - wr32(hw, test->reg + (i * 0x40), test->write); + hw_wr32(hw, test->reg + (i * 0x40), test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, @@ -277,11 +278,12 @@ void rnpgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, if (adapter->vfinfo[i].clear_to_send) { netdev_warn(netdev, "%s", "offline diagnostic is not supported when VFs are present\n"); - data[0] = 1; - data[1] = 1; - data[2] = 1; - data[3] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + if (rnpgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__RNP_TESTING, &adapter->state); goto skip_ol_tests; @@ -396,12 +398,10 @@ int rnpgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) /*For we just set it as pf0 */ if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED)) return ethtool_op_get_ts_info(dev, info); - if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); else info->phc_index = -1; - info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE | @@ -767,9 +767,14 @@ int rnpgbe_set_ringparam(struct net_device *netdev, for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reset_count = new_rx_count; } - if (hw->ops.driver_status) + + /* if now we are in force mode, never need force, if not force it */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); hw->ops.driver_status(hw, true, - rnpgbe_driver_force_control_mac); + rnpgbe_driver_force_control_phy); + } + rnpgbe_down(adapter); /* Setup new Tx resources and free the old Tx resources in that order. * We can then assign the new resources to the rings via a memcpy. @@ -829,9 +834,10 @@ int rnpgbe_set_ringparam(struct net_device *netdev, err_setup: rnpgbe_up(adapter); vfree(temp_ring); - if (hw->ops.driver_status) + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { hw->ops.driver_status(hw, false, - rnpgbe_driver_force_control_mac); + rnpgbe_driver_force_control_phy); + } clear_reset: clear_bit(__RNP_RESETTING, &adapter->state); return err; @@ -1132,7 +1138,7 @@ static int rnpgbe_get_ethtool_fdir_entry(struct rnpgbe_adapter *adapter, fsp->flow_type = ETHER_FLOW; /* support proto and mask only in this mode */ fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto; - fsp->m_u.ether_spec.h_proto = 0xffff; + fsp->m_u.ether_spec.h_proto = htons(0xffff); break; default: return -EINVAL; @@ -1148,10 +1154,10 @@ static int rnpgbe_get_ethtool_fdir_entry(struct rnpgbe_adapter *adapter, rule->filter.formatted.src_ip[0]; fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; - fsp->m_u.tcp_ip4_spec.psrc = 0xffff; - fsp->m_u.tcp_ip4_spec.pdst = 0xffff; - fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; - fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff; + fsp->m_u.tcp_ip4_spec.psrc = htons(0xffff); + fsp->m_u.tcp_ip4_spec.pdst = htons(0xffff); + fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xffffffff); + fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xffffffff); } else { fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port & @@ -1391,12 +1397,12 @@ static int rnpgbe_flowspec_to_flow_type(struct rnpgbe_adapter *adapter, ret = 0; } if (fsp->h_u.usr_ip4_spec.ip4src != 0 && - fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff) { + fsp->m_u.usr_ip4_spec.ip4src != htonl(0xffffffff)) { e_err(drv, "ip src mask error\n"); ret = 0; } if (fsp->h_u.usr_ip4_spec.ip4dst != 0 && - fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff) { + fsp->m_u.usr_ip4_spec.ip4dst != htonl(0xffffffff)) { e_err(drv, "ip dst mask error\n"); ret = 0; } @@ -1425,22 +1431,22 @@ static int rnpgbe_flowspec_to_flow_type(struct rnpgbe_adapter *adapter, ret = 0; } if (fsp->h_u.tcp_ip4_spec.ip4src != 0 && - fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff) { + fsp->m_u.tcp_ip4_spec.ip4src != htonl(0xffffffff)) { e_err(drv, "src mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.ip4dst != 0 && - fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff) { + fsp->m_u.tcp_ip4_spec.ip4dst != htonl(0xffffffff)) { e_err(drv, "dst mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.psrc != 0 && - fsp->m_u.tcp_ip4_spec.psrc != 0xffff) { + fsp->m_u.tcp_ip4_spec.psrc != htons(0xffff)) { e_err(drv, "src port mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.pdst != 0 && - fsp->m_u.tcp_ip4_spec.pdst != 0xffff) { + fsp->m_u.tcp_ip4_spec.pdst != htons(0xffff)) { e_err(drv, "src port mask error\n"); ret = 0; } @@ -1654,9 +1660,9 @@ static int rnpgbe_add_ethtool_fdir_entry(struct rnpgbe_adapter *adapter, input->filter.formatted.dst_ip_mask[0] = fsp->m_u.usr_ip4_spec.ip4dst; input->filter.formatted.src_port = 0; - input->filter.formatted.src_port_mask = 0xffff; + input->filter.formatted.src_port_mask = htons(0xffff); input->filter.formatted.dst_port = 0; - input->filter.formatted.dst_port_mask = 0xffff; + input->filter.formatted.dst_port_mask = htons(0xffff); input->filter.formatted.inner_mac[0] = fsp->h_u.usr_ip4_spec.proto; input->filter.formatted.inner_mac_mask[0] = @@ -1792,13 +1798,51 @@ int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) return 0; } -enum { - PART_FW, - PART_CFG, - PART_MACSN, - PART_PCSPHY, - PART_PXE, -}; +static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data, int len) +{ + u32 device_id; + int ret = 0; + u32 crc32_goal; + u32 crc32 = 0xffffffff; + struct crc32_info *info = (struct crc32_info *)(data + CRC_OFFSET); + + if (info->magic == CRC32_MAGIC) { + crc32_goal = info->crc32; + info->crc32 = 0; + info->magic = 0; + + crc32 = crc32_le(crc32, data, len); + if (crc32 != crc32_goal) + return -1; + info->magic = CRC32_MAGIC; + info->crc32 = crc32_goal; + } + + device_id = *((u16 *)data + 30); + + /* if no device_id no check */ + if (device_id == 0 || device_id == 0xffff) + return 0; + + switch (hw->hw_type) { + case rnpgbe_hw_n500: + if (device_id != 0x8308) + ret = 1; + break; + case rnpgbe_hw_n210: + if (device_id != 0x8208) + ret = 1; + break; + case rnpgbe_hw_n210L: + if (device_id != 0x820a) + ret = 1; + break; + default: + ret = 1; + } + + return ret; +} static int rnpgbe_flash_firmware(struct rnpgbe_adapter *adapter, int region, const u8 *data, int bytes) @@ -1809,26 +1853,14 @@ static int rnpgbe_flash_firmware(struct rnpgbe_adapter *adapter, int region, case PART_FW: if (*((u32 *)(data)) != 0xa55aa55a) return -EINVAL; - break; - case PART_CFG: - if (*((u32 *)(data)) != 0x00010cf9) - return -EINVAL; - break; - case PART_MACSN: - break; - case PART_PCSPHY: - if (*((u16 *)(data)) != 0x081d) - return -EINVAL; - break; - case PART_PXE: - if (*((u16 *)(data)) != 0xaa55) + if (check_fw_type(hw, data, bytes)) return -EINVAL; break; default: return -EINVAL; } - return rnp500_fw_update(hw, region, data, bytes); + return rnpgbe_fw_update(hw, region, data, bytes); } static int rnpgbe_flash_firmware_from_file(struct net_device *dev, diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index a49791dfdb42..96eefde489eb 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -625,7 +625,7 @@ static void rnpgbe_free_q_vectors(struct rnpgbe_adapter *adapter) rnpgbe_free_q_vector(adapter, v_idx); } -void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter) +static void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter) { if (adapter->flags & RNP_FLAG_MSIX_ENABLED) pci_disable_msix(adapter->pdev); @@ -648,7 +648,7 @@ void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter) * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -int rnpgbe_set_interrupt_capability(struct rnpgbe_adapter *adapter) +static int rnpgbe_set_interrupt_capability(struct rnpgbe_adapter *adapter) { struct rnpgbe_hw *hw = &adapter->hw; int vector, v_budget, err = 0; @@ -805,9 +805,9 @@ void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter) * **/ -void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring, u32 mss_len_vf_num, - u32 inner_vlan_tunnel_len, int ignore_vlan, - bool crc_pad) +static void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, + bool crc_pad) { struct rnpgbe_tx_ctx_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -841,7 +841,7 @@ void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring, u32 mss_len_vf_num, if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) { if (ignore_vlan) { context_desc->inner_vlan_tunnel_len |= - VF_VEB_IGNORE_VLAN; + cpu_to_le32(VF_VEB_IGNORE_VLAN); } } buf_dump_line("ctx ", __LINE__, context_desc, sizeof(*context_desc)); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 2a891bb35078..6545d17eb01a 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,8 @@ char rnpgbe_driver_name[] = "rnpgbe"; static const char rnpgbe_driver_string[] = "mucse 1 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "0.2.1-rc5" +#define DRV_VERSION "1.0.0" +static u32 driver_version = 0x01000000; #include "version.h" const char rnpgbe_driver_version[] = DRV_VERSION GIT_COMMIT; @@ -52,6 +54,7 @@ static const char rnpgbe_copyright[] = static struct rnpgbe_info *rnpgbe_info_tbl[] = { [board_n500] = &rnpgbe_n500_info, [board_n210] = &rnpgbe_n210_info, + [board_n210L] = &rnpgbe_n210L_info, }; static int register_mbx_irq(struct rnpgbe_adapter *adapter); @@ -76,11 +79,13 @@ static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring, static struct pci_device_id rnpgbe_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_QUAD_PORT), - .driver_data = board_n500 }, /* n500 */ + .driver_data = board_n500 }, /* n500 */ { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_DUAL_PORT), - .driver_data = board_n500 }, /* n500 */ + .driver_data = board_n500 }, /* n500 */ { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210), - .driver_data = board_n210 }, /* n210 */ + .driver_data = board_n210 }, /* n210 */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210L), + .driver_data = board_n210L }, /* n210L */ /* required last entry */ { 0, @@ -95,7 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static unsigned int module_enable_ptp = 1; module_param(module_enable_ptp, uint, 0000); -MODULE_PARM_DESC(module_enable_ptp, "enable ptp, disabled default"); +MODULE_PARM_DESC(module_enable_ptp, "enable ptp, enabled default"); MODULE_AUTHOR("Mucse Corporation, "); MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver"); @@ -111,7 +116,85 @@ static int enable_hi_dma; static void rnpgbe_service_timer(struct timer_list *t); static void rnpgbe_setup_eee_mode(struct rnpgbe_adapter *adapter, bool status); -static void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter) +u32 hw_rd32(struct rnpgbe_hw *hw, u32 off) +{ + u8 __iomem *addr = READ_ONCE(hw->hw_addr); + struct device *dev = &hw->pdev->dev; + u32 value = 0; + + if (unlikely(!(addr))) + return ~value; + + value = readl(addr + off); + if (!(~value)) + dev_info(dev, "maybe pcie link lost? hw %x\n", off); + return value; +} + +void hw_wr32(struct rnpgbe_hw *hw, u32 off, u32 val) +{ + u8 __iomem *addr = READ_ONCE(hw->hw_addr); + + writel((val), addr + off); +} + +u32 dma_rd32(struct rnpgbe_dma_info *dma, u32 off) +{ + struct rnpgbe_hw *hw = (struct rnpgbe_hw *)dma->back; + u8 __iomem *addr = READ_ONCE(dma->dma_base_addr); + struct device *dev = &hw->pdev->dev; + u32 value = 0; + + if (unlikely(!(addr))) + return ~value; + + value = readl(addr + off); + if (!(~value)) { + hw->hw_addr = NULL; + hw->dma.dma_base_addr = NULL; + hw->eth.eth_base_addr = NULL; + hw->mac.mac_addr = NULL; + dev_info(dev, "maybe pcie link lost ??\n"); + } + return value; +} + +void dma_wr32(struct rnpgbe_dma_info *dma, u32 off, u32 val) +{ + u8 __iomem *addr = READ_ONCE(dma->dma_base_addr); + + writel((val), addr + off); +} + +u32 eth_rd32(struct rnpgbe_eth_info *eth, u32 off) +{ + struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back; + u8 __iomem *addr = READ_ONCE(eth->eth_base_addr); + struct device *dev = &hw->pdev->dev; + u32 value = 0; + + if (unlikely(!(addr))) + return ~value; + + value = readl(addr + off); + if (!(~value)) { + hw->hw_addr = NULL; + hw->dma.dma_base_addr = NULL; + hw->eth.eth_base_addr = NULL; + hw->mac.mac_addr = NULL; + dev_info(dev, "maybe pcie link lost ?\n"); + } + return value; +} + +void eth_wr32(struct rnpgbe_eth_info *eth, u32 off, u32 val) +{ + u8 __iomem *addr = READ_ONCE(eth->eth_base_addr); + + writel((val), addr + off); +} + +void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter) { if (!test_bit(__RNP_DOWN, &adapter->state) && !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state)) @@ -433,7 +516,7 @@ static inline void rnpgbe_rx_hash(struct rnpgbe_ring *ring, if (!(ring->netdev->features & NETIF_F_RXHASH)) return; #define RNP_RSS_TYPE_MASK 0xc0 - rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK; + rss_type = le16_to_cpu(rx_desc->wb.cmd & cpu_to_le16(RNP_RSS_TYPE_MASK)); skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } @@ -519,6 +602,29 @@ static inline int rnpgbe_skb_pad(void) #define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /* PAGE_SIZE < 8192 */ +static void handle_other_stags(struct sk_buff *skb, u16 vid, + struct rnpgbe_adapter *adapter) +{ + /* should consider other stags */ + switch (adapter->outer_vlan_type) { + case outer_vlan_type_88a8: + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + vid); + break; + case outer_vlan_type_9100: + __vlan_hwaccel_put_tag(skb, htons(ETH_P_QINQ1), + vid); + break; + case outer_vlan_type_9200: + __vlan_hwaccel_put_tag(skb, htons(ETH_P_QINQ2), + vid); + break; + default: + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + vid); + break; + } +} /** * rnpgbe_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on @@ -534,48 +640,23 @@ static void rnpgbe_process_skb_fields(struct rnpgbe_ring *rx_ring, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; - struct rnpgbe_adapter *adapter = netdev_priv(dev); - struct rnpgbe_hw *hw = &adapter->hw; + struct rnpgbe_adapter *adapter; + struct rnpgbe_hw *hw; + adapter = netdev_priv(dev); + hw = &adapter->hw; rnpgbe_rx_hash(rx_ring, rx_desc, skb); rnpgbe_rx_checksum(rx_ring, rx_desc, skb); - /* if ncsi with stags on */ - if (hw->ncsi_en && (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)) { - /* check outer stags with set one */ - u8 header[ETH_ALEN + ETH_ALEN]; - u8 *data = skb->data; - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; - - if (veth->h_vlan_proto != htons(ETH_P_8021AD)) - goto skip_vlan; - - if (veth->h_vlan_TCI != htons(adapter->stags_vid)) - goto skip_vlan; - - memcpy(header, data, ETH_ALEN + ETH_ALEN); - memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN); - skb->len -= 4; - skb->data += 4; - goto skip_vlan; - } - - if (!(((dev->features & NETIF_F_HW_VLAN_CTAG_RX) || - (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && - rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) && - !ignore_veb_vlan(adapter, rx_desc))) - goto skip_vlan; - /* check outer vlan first */ - if (rnpgbe_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { - u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); - u16 vid_outer; - u16 vlan_tci = htons(ETH_P_8021Q); - - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid_inner); + if (hw->ncsi_en) { + /* if ncsi with stags on */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { + u8 header[ETH_ALEN + ETH_ALEN]; + u8 *data = skb->data; + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + __be16 vlan_tci; - /* check outer vlan type */ - if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) { switch (adapter->outer_vlan_type) { case outer_vlan_type_88a8: vlan_tci = htons(ETH_P_8021AD); @@ -590,59 +671,97 @@ static void rnpgbe_process_skb_fields(struct rnpgbe_ring *rx_ring, vlan_tci = htons(ETH_P_8021AD); break; } - } else { - vlan_tci = htons(ETH_P_8021Q); + + if (veth->h_vlan_proto != vlan_tci) + goto skip_vlan; + + if ((be16_to_cpu(veth->h_vlan_TCI) & 0x0fff) != adapter->stags_vid) + goto skip_vlan; + + memcpy(header, data, ETH_ALEN + ETH_ALEN); + memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN); + skb->len -= 4; + skb->data += 4; + goto skip_vlan; + } - vid_outer = le16_to_cpu(rx_desc->wb.mark); - /* if in stags mode should ignore only stags */ - if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { - /* push outer in if not equal stags or cvlan */ - if (vid_outer != adapter->stags_vid || - vlan_tci == htons(ETH_P_8021Q)) { + } + + if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX) || + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && + rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) && + !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) { + if (rnpgbe_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { + u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); + u16 vid_outer; + __be16 vlan_tci = htons(ETH_P_8021Q); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid_inner); + if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) { + switch (rx_ring->q_vector->adapter + ->outer_vlan_type) { + case outer_vlan_type_88a8: + vlan_tci = htons(ETH_P_8021AD); + break; + case outer_vlan_type_9100: + vlan_tci = htons(ETH_P_QINQ1); + break; + case outer_vlan_type_9200: + vlan_tci = htons(ETH_P_QINQ2); + break; + default: + vlan_tci = htons(ETH_P_8021AD); + break; + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + vid_outer = le16_to_cpu(rx_desc->wb.mark); + /* if in stags mode should ignore only stags */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* push outer in if not equal stags or cvlan */ + if (vid_outer != adapter->stags_vid || + vlan_tci == htons(ETH_P_8021Q)) { + /* push outer inner */ + skb = __vlan_hwaccel_push_inside(skb); + __vlan_hwaccel_put_tag(skb, vlan_tci, + vid_outer); + /* if not 88a8, push again to avoid kernel crash + * todo + */ + } + /* if vid_outer is stags_vid do nothing */ + } else { + /* push outer */ skb = __vlan_hwaccel_push_inside(skb); __vlan_hwaccel_put_tag(skb, vlan_tci, vid_outer); } - } else { - skb = __vlan_hwaccel_push_inside(skb); - __vlan_hwaccel_put_tag(skb, vlan_tci, vid_outer); - } - } else { - /* only inner vlan */ - u16 vid = le16_to_cpu(rx_desc->wb.vlan); - if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) { - if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && - vid == adapter->stags_vid) - goto skip_outer_vlan; - switch (adapter->outer_vlan_type) { - case outer_vlan_type_88a8: - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), - vid); - break; - case outer_vlan_type_9100: - __vlan_hwaccel_put_tag(skb, htons(ETH_P_QINQ1), - vid); - break; - case outer_vlan_type_9200: - __vlan_hwaccel_put_tag(skb, htons(ETH_P_QINQ2), - vid); - break; - default: - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + } else { + /* only inner vlan */ + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + /* check vlan type */ + if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + vid == adapter->stags_vid) { + /* do nothing ignore this stags */ + } else { + handle_other_stags(skb, vid, + adapter); + } + } else { + /* only do put if hw strip vlan */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - break; } - } else { - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } + rx_ring->rx_stats.vlan_remove++; } -skip_outer_vlan: - rx_ring->rx_stats.vlan_remove++; - skip_vlan: skb_record_rx_queue(skb, rx_ring->queue_index); - skb->protocol = eth_type_trans(skb, dev); } @@ -680,23 +799,13 @@ static bool rnpgbe_check_csum_error(struct rnpgbe_ring *rx_ring, rx_ring->rx_stats.csum_err++; if ((!(netdev->flags & IFF_PROMISC) && - (!(netdev->features & NETIF_F_RXALL)))) { + (!(netdev->features & NETIF_F_RXALL)))) err = true; - goto skip_fix; - } } -skip_fix: + if (err) { u32 ntc = rx_ring->next_to_clean + 1; struct rnpgbe_rx_buffer *rx_buffer; -#if (PAGE_SIZE < 8192) - unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2; -#else - unsigned int truesize = - ring_uses_build_skb(rx_ring) ? - SKB_DATA_ALIGN(RNP_SKB_PAD + size) : - SKB_DATA_ALIGN(size); -#endif /* if eop add drop_packets */ if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) @@ -708,11 +817,8 @@ static bool rnpgbe_check_csum_error(struct rnpgbe_ring *rx_ring, rx_buffer->page_offset, size, DMA_FROM_DEVICE); -#if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; -#else - rx_buffer->page_offset += truesize; -#endif + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; #ifdef OPTM_WITH_LPAGE rnpgbe_put_rx_buffer(rx_ring, rx_buffer); @@ -726,46 +832,6 @@ static bool rnpgbe_check_csum_error(struct rnpgbe_ring *rx_ring, return err; } -/** - * rnpgbe_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count - * @adapter: pointer to adapter struct - * @rx_ring: rx descriptor ring to transact packets on - */ -static int rnpgbe_rx_ring_reinit(struct rnpgbe_adapter *adapter, - struct rnpgbe_ring *rx_ring) -{ - struct rnpgbe_ring *temp_ring; - int err = 0; - - if (rx_ring->count == rx_ring->reset_count) - return 0; - - temp_ring = vmalloc(array_size(1, sizeof(struct rnpgbe_ring))); - if (!temp_ring) - return -1; - - /* stop rx queue */ - rnpgbe_disable_rx_queue(adapter, rx_ring); - memset(temp_ring, 0x00, sizeof(struct rnpgbe_ring)); - /* reinit for this ring */ - memcpy(temp_ring, rx_ring, sizeof(struct rnpgbe_ring)); - /* setup new count */ - temp_ring->count = rx_ring->reset_count; - err = rnpgbe_setup_rx_resources(temp_ring, adapter); - if (err) { - rnpgbe_free_rx_resources(temp_ring); - goto err_setup; - } - rnpgbe_free_rx_resources(rx_ring); - memcpy(rx_ring, temp_ring, sizeof(struct rnpgbe_ring)); - rnpgbe_configure_rx_ring(adapter, rx_ring); -err_setup: - vfree(temp_ring); - /* start rx */ - ring_wr32(rx_ring, RNP_DMA_RX_START, 1); - return 0; -} - static inline unsigned int rnpgbe_rx_offset(struct rnpgbe_ring *rx_ring) { return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0; @@ -1075,16 +1141,17 @@ static bool rnpgbe_cleanup_headers(struct rnpgbe_ring __maybe_unused *rx_ring, * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) +static bool rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) { union rnpgbe_rx_desc *rx_desc; struct rnpgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + bool err = false; u16 bufsz; /* nothing to do */ if (!cleaned_count) - return; + return err; rx_desc = RNP_RX_DESC(rx_ring, i); @@ -1102,8 +1169,15 @@ void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) struct page *page; if (!rnpgbe_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz, - fun_id)) + fun_id)) { + err = true; break; + } + { + u16 *data = page_address(bi->page) + bi->page_offset; + + *data = CHECK_DATA; + } page = bi->page; rx_desc->resv_cmd = 0; @@ -1178,6 +1252,8 @@ void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != i) rnpgbe_update_rx_tail(rx_ring, i); + + return err; } static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring, @@ -1265,6 +1341,9 @@ static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring, return false; /* place skb in next buffer to be received */ + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + return true; } @@ -1273,17 +1352,27 @@ rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc, const unsigned int size) { struct rnpgbe_rx_buffer *rx_buffer; + int time = 0; + u16 *data; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + data = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetchw(rx_buffer->page); rx_buf_dump("rx buf", page_address(rx_buffer->page) + rx_buffer->page_offset, rx_desc->wb.len); - +try_sync: /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size, DMA_FROM_DEVICE); + + if ((*data == CHECK_DATA) && time < 5) { + time++; + udelay(5); + rx_ring->rx_stats.rx_resync++; + goto try_sync; + } /* skip_sync: */ rx_buffer->pagecnt_bias--; @@ -1428,6 +1517,7 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, struct sk_buff *skb = rx_ring->skb; struct rnpgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring); + bool fail_alloc = false; while (likely(total_rx_packets < budget)) { union rnpgbe_rx_desc *rx_desc; @@ -1437,7 +1527,8 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= RNP_RX_BUFFER_WRITE) { - rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count); + fail_alloc = rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count) || + fail_alloc; cleaned_count = 0; } rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -1511,6 +1602,7 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, rx_buffer->pagecnt_bias++; break; } + if (module_enable_ptp && adapter->ptp_rx_en && adapter->flags2 & RNP_FLAG2_PTP_ENABLED) rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb); @@ -1535,7 +1627,8 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, /* populate checksum, timestamp, VLAN, and protocol */ rnpgbe_process_skb_fields(rx_ring, rx_desc, skb); - //rx_buf_dump("rx-data:", skb->data, skb->len); + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; rnpgbe_rx_skb(q_vector, skb); skb = NULL; @@ -1560,14 +1653,10 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - //printk("clean rx irq %d\n", total_rx_packets); if (total_rx_packets >= budget) rx_ring->rx_stats.poll_again_count++; - //if (cleaned_count) - //rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count); - - return total_rx_packets; + return fail_alloc ? budget : total_rx_packets; } /** @@ -1602,7 +1691,7 @@ static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring) /* free resources associated with mapping */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, - rnpgbe_rx_pg_size(rx_ring), + rnpgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE, RNP_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, @@ -1630,16 +1719,17 @@ static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring) * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) +static bool rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) { union rnpgbe_rx_desc *rx_desc; struct rnpgbe_rx_buffer *bi; u16 i = rx_ring->next_to_use; u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + bool err = false; u16 bufsz; /* nothing to do */ if (!cleaned_count) - return; + return err; rx_desc = RNP_RX_DESC(rx_ring, i); @@ -1653,8 +1743,10 @@ void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) bufsz = rnpgbe_rx_bufsz(rx_ring); do { - if (!rnpgbe_alloc_mapped_page(rx_ring, bi)) + if (!rnpgbe_alloc_mapped_page(rx_ring, bi)) { + err = true; break; + } dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, bufsz, DMA_FROM_DEVICE); @@ -1684,6 +1776,8 @@ void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count) if (rx_ring->next_to_use != i) rnpgbe_update_rx_tail(rx_ring, i); + + return err; } static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring, @@ -1756,6 +1850,9 @@ static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring, rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + return true; } @@ -1764,8 +1861,11 @@ rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc, struct sk_buff **skb, const unsigned int size) { struct rnpgbe_rx_buffer *rx_buffer; + int time = 0; + u16 *data; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + data = page_address(rx_buffer->page) + rx_buffer->page_offset; prefetchw(rx_buffer->page); *skb = rx_buffer->skb; @@ -1773,10 +1873,17 @@ rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc, page_address(rx_buffer->page) + rx_buffer->page_offset, rx_desc->wb.len); +try_sync: /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); + if ((*data == CHECK_DATA) && time < 4) { + time++; + udelay(5); + rx_ring->rx_stats.rx_resync++; + goto try_sync; + } /* skip_sync: */ rx_buffer->pagecnt_bias--; @@ -1934,6 +2041,7 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, struct rnpgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring); struct xdp_buff xdp; + bool fail_alloc = false; xdp.data = NULL; xdp.data_end = NULL; @@ -1946,7 +2054,8 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= RNP_RX_BUFFER_WRITE) { - rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count); + fail_alloc = rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count) || + fail_alloc; cleaned_count = 0; } rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); @@ -2022,6 +2131,7 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, rx_buffer->pagecnt_bias++; break; } + if (module_enable_ptp && adapter->ptp_rx_en && adapter->flags2 & RNP_FLAG2_PTP_ENABLED) rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb); @@ -2041,6 +2151,9 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, total_rx_bytes += skb->len; /* populate checksum, timestamp, VLAN, and protocol */ rnpgbe_process_skb_fields(rx_ring, rx_desc, skb); + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + rnpgbe_rx_skb(q_vector, skb); /* update budget accounting */ total_rx_packets++; @@ -2063,7 +2176,7 @@ static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector, if (total_rx_packets >= budget) rx_ring->rx_stats.poll_again_count++; - return total_rx_packets; + return fail_alloc ? budget : total_rx_packets; } /** @@ -2173,16 +2286,35 @@ static void rnpgbe_write_eitr_rx(struct rnpgbe_q_vector *q_vector) struct rnpgbe_adapter *adapter = q_vector->adapter; struct rnpgbe_hw *hw = &adapter->hw; u32 new_itr_rx = q_vector->rx.itr; + u32 old_itr_rx = q_vector->rx.itr; struct rnpgbe_ring *ring; - if (new_itr_rx == q_vector->itr_rx) + new_itr_rx = new_itr_rx * hw->usecstocount; + /* if we are in auto mode write to hw */ + if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE) return; - q_vector->itr_rx = new_itr_rx; - new_itr_rx = new_itr_rx * hw->usecstocount; - if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE)) { - rnpgbe_for_each_ring(ring, q_vector->rx) - ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, new_itr_rx); + rnpgbe_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + new_itr_rx); + if (ring->ring_flags & RNP_RING_LOWER_ITR) { + /* if we are already in this mode skip */ + if (q_vector->itr_rx == RNP_LOWEREST_ITR) + continue; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + 1); + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + RNP_LOWEREST_ITR); + q_vector->itr_rx = RNP_LOWEREST_ITR; + } else { + if (new_itr_rx == q_vector->itr_rx) + continue; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + new_itr_rx); + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + adapter->rx_frames); + q_vector->itr_rx = old_itr_rx; + } } } @@ -2200,7 +2332,8 @@ static inline void rnpgbe_irq_enable_queues(struct rnpgbe_adapter *adapter, rnpgbe_for_each_ring(ring, q_vector->rx) { rnpgbe_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); - ring_wr32(ring, RNP_DMA_INT_TRIG, TX_INT_MASK | RX_INT_MASK); + ring_wr32(ring, RNP_DMA_INT_TRIG, + MASK_VALID | TX_INT_MASK | RX_INT_MASK); } } @@ -2209,7 +2342,8 @@ static inline void rnpgbe_irq_disable_queues(struct rnpgbe_q_vector *q_vector) struct rnpgbe_ring *ring; rnpgbe_for_each_ring(ring, q_vector->tx) { - ring_wr32(ring, RNP_DMA_INT_TRIG, ~(TX_INT_MASK | RX_INT_MASK)); + ring_wr32(ring, RNP_DMA_INT_TRIG, + MASK_VALID | (~(TX_INT_MASK | RX_INT_MASK))); rnpgbe_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); } } @@ -2309,11 +2443,6 @@ static void rnpgbe_update_ring_itr_rx(struct rnpgbe_q_vector *q_vector) else new_val = avg_wire_size / 2; - new_val = new_val / 4; - - if (packets < 3) - new_val = RNP_LOWEREST_ITR; - if (new_val < RNP_LOWEREST_ITR) new_val = RNP_LOWEREST_ITR; @@ -2362,11 +2491,9 @@ int rnpgbe_poll(struct napi_struct *napi, int budget) rnpgbe_for_each_ring(ring, q_vector->rx) { int cleaned = 0; - /* this ring is waitting to reset rx_len*/ - /* avoid to deal this ring until reset done */ - if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN))) - cleaned = rnpgbe_clean_rx_irq(q_vector, ring, - per_ring_budget); + + cleaned = rnpgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); work_done += cleaned; if (cleaned >= per_ring_budget) clean_complete = false; @@ -2561,28 +2688,21 @@ static int rnpgbe_request_irq(struct rnpgbe_adapter *adapter) if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { pr_info("msix mode is used\n"); err = rnpgbe_request_msix_irqs(adapter); - if (hw->hw_type == rnpgbe_hw_n500 || - hw->hw_type == rnpgbe_hw_n210) - wr32(hw, RNP500_LEGANCY_ENABLE, 0); + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 0); } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { /* in this case one for all */ pr_info("msi mode is used\n"); err = request_irq(adapter->pdev->irq, rnpgbe_intr, 0, adapter->netdev->name, adapter); adapter->hw.mbx.other_irq_enabled = true; - if (hw->hw_type == rnpgbe_hw_n500 || - hw->hw_type == rnpgbe_hw_n210) - wr32(hw, RNP500_LEGANCY_ENABLE, 0); + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 0); } else { pr_info("legacy mode is used\n"); err = request_irq(adapter->pdev->irq, rnpgbe_intr, IRQF_SHARED, adapter->netdev->name, adapter); adapter->hw.mbx.other_irq_enabled = true; - if (hw->hw_type == rnpgbe_hw_n500 || - hw->hw_type == rnpgbe_hw_n210) { - wr32(hw, RNP500_LEGANCY_ENABLE, 1); - wr32(hw, RNP500_LEGANCY_TIME, 0x200); - } + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 1); + hw_wr32(hw, RNPGBE_LEGANCY_TIME, 0x200); } if (err) @@ -2604,7 +2724,7 @@ static void rnpgbe_free_irq(struct rnpgbe_adapter *adapter) } else { free_irq(adapter->pdev->irq, adapter); adapter->hw.mbx.other_irq_enabled = false; - wr32(hw, RNP500_LEGANCY_ENABLE, 0); + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 0); } } @@ -2651,12 +2771,16 @@ static int rnpgbe_tx_maxrate_own(struct rnpgbe_adapter *adapter, if (!maxrate) { return rnpgbe_setup_tx_maxrate(tx_ring, - 0, adapter->hw.usecstocount * 1000000); + 0, adapter->hw.usecstocount * 100000); } /* we need turn it to bytes/s */ - real_rate = ((u64)maxrate * 1024 * 1024) / 8; + if (real_rate < 50) + real_rate = ((u64)maxrate * 1000 * 85) >> 3; + else + real_rate = ((u64)maxrate * 1000 * 94) >> 3; + rnpgbe_setup_tx_maxrate(tx_ring, real_rate, - adapter->hw.usecstocount * 1000000); + adapter->hw.usecstocount * 100000); return 0; } @@ -2900,8 +3024,8 @@ static void rnpgbe_configure_rx(struct rnpgbe_adapter *adapter) rnpgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); if (adapter->num_rx_queues > 0) { - wr32(hw, RNP_ETH_DEFAULT_RX_RING, - adapter->rx_ring[0]->rnpgbe_queue_idx); + hw_wr32(hw, RNP_ETH_DEFAULT_RX_RING, + adapter->rx_ring[0]->rnpgbe_queue_idx); } /* enable all receives */ @@ -2959,6 +3083,10 @@ static int rnpgbe_vlan_rx_add_vid(struct net_device *netdev, set_bit(vid, adapter->active_vlans); } } + + if (vid == 0) + veb_setup = false; + /* only ctags setup veb if in sriov and not stags */ if (hw->ops.set_vlan_filter) { hw->ops.set_vlan_filter(hw, vid, true, @@ -3003,10 +3131,11 @@ static int rnpgbe_vlan_rx_kill_vid(struct net_device *netdev, if (!test_bit(vid, adapter->active_vlans_stags)) true_remove = 1; } + if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && + vid != adapter->stags_vid) + true_remove = 0; + if (true_remove) { - if ((adapter->flags2 & - RNP_FLAG2_VLAN_STAGS_ENABLED) && - vid != adapter->stags_vid) hw->ops.set_vlan_filter(hw, vid, false, veb_setup); } @@ -3181,15 +3310,24 @@ static void rnpgbe_fdir_filter_restore(struct rnpgbe_adapter *adapter) /* setup ntuple */ hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, fdir_node) { - rnpgbe_fdir_write_perfect_filter(adapter->fdir_mode, - hw, &filter->filter, filter->hw_idx, - (filter->action == RNP_FDIR_DROP_QUEUE) ? - RNP_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action] - ->rnpgbe_queue_idx, - (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ? - true : - false); + int queue = adapter->rx_ring[filter->action]->rnpgbe_queue_idx; + bool prio = !!(adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO); + bool drop = !!(filter->action == RNP_FDIR_DROP_QUEUE); + + if (!filter->vf_num && filter->action != ACTION_TO_MPE) { + rnpgbe_fdir_write_perfect_filter(adapter->fdir_mode, + hw, &filter->filter, + filter->hw_idx, + drop ? RNP_FDIR_DROP_QUEUE : + queue, prio ? true : false); + } else { + rnpgbe_fdir_write_perfect_filter(adapter->fdir_mode, + hw, &filter->filter, + filter->hw_idx, + drop ? RNP_FDIR_DROP_QUEUE : + filter->action, + prio ? true : false); + } } spin_unlock(&adapter->fdir_perfect_lock); @@ -3234,6 +3372,12 @@ static void rnpgbe_configure(struct rnpgbe_adapter *adapter) rnpgbe_init_rss_key(adapter); rnpgbe_init_rss_table(adapter); + /* should setup rx hash status */ + if (adapter->netdev->features & (NETIF_F_RXHASH)) + hw->ops.set_rx_hash(hw, true, sriov_flag); + else + hw->ops.set_rx_hash(hw, false, sriov_flag); + if (!(adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE)) { if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) rnpgbe_fdir_filter_restore(adapter); @@ -3292,22 +3436,12 @@ static void rnpgbe_up_complete(struct rnpgbe_adapter *adapter) adapter->link_check_timeout = jiffies; mod_timer(&adapter->service_timer, jiffies); - /* Set PF Reset Done bit so PF/VF Mail Ops can work */ - /* maybe differ in n500 */ hw->link = 0; hw->ops.set_mbx_link_event(hw, 1); hw->ops.set_mbx_ifup(hw, 1); - - if (hw->ncsi_en && - (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { - if (hw->ops.driver_status) { - hw->ops.driver_status(hw, false, - rnpgbe_driver_force_control_mac); - } - } } -static void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter) +void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ @@ -3520,25 +3654,19 @@ static void print_status(struct rnpgbe_adapter *adapter) void rnpgbe_down(struct rnpgbe_adapter *adapter) { + bool is_pci_dead = pci_channel_offline(adapter->pdev); struct net_device *netdev = adapter->netdev; struct rnpgbe_hw *hw = &adapter->hw; - int i; - int free_tx_ealay = 0; + bool is_pci_online = !is_pci_dead; int err = 0; + int i; /* signal that we are down to the interrupt handler */ set_bit(__RNP_DOWN, &adapter->state); /* close rx only when no ncsi and no sriov on */ - if (!hw->ncsi_en && (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))) + if (!hw->ncsi_en) hw->ops.set_mac_rx(hw, false); - if (hw->ncsi_en && - (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { - if (hw->ops.driver_status) - hw->ops.driver_status(hw, true, - rnpgbe_driver_force_control_mac); - } - hw->ops.set_mbx_link_event(hw, 0); hw->ops.set_mbx_ifup(hw, 0); rnpgbe_setup_eee_mode(adapter, false); @@ -3553,73 +3681,52 @@ void rnpgbe_down(struct rnpgbe_adapter *adapter) netif_carrier_off(netdev); usleep_range(5000, 10000); /* if we have tx desc to clean */ - for (i = 0; i < adapter->num_tx_queues; i++) { + for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) { struct rnpgbe_ring *tx_ring = adapter->tx_ring[i]; + int head, tail; + int timeout = 0; - { - int head, tail; - int timeout = 0; + /* should first check if have packets to send */ + if (tx_ring->next_to_use == tx_ring->next_to_clean) + continue; - free_tx_ealay = 1; - /* should first check if have packets to send */ - if (tx_ring->next_to_use == tx_ring->next_to_clean) - continue; + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); - head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); - tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); - - while (head != tail) { - usleep_range(30000, 50000); - - head = ring_rd32(tx_ring, - RNP_DMA_REG_TX_DESC_BUF_HEAD); - tail = ring_rd32(tx_ring, - RNP_DMA_REG_TX_DESC_BUF_TAIL); - timeout++; - if (timeout >= 100 && timeout < 101) { - e_info(drv, - "wait ring %d tx done timeout %x %x\n", - i, head, tail); - adapter->priv_flags |= - RNP_PRIV_FLGA_TEST_TX_HANG; - print_status(adapter); - err = 1; - } - if (timeout >= 200) { - e_info(drv, - "200 wait tx done timeout %x %x\n", - head, tail); - print_status(adapter); - break; - } - } - } - } + while (head != tail) { + usleep_range(30000, 50000); - { - int time = 0; + head = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_TAIL); + timeout++; - while (test_bit(__RNP_SERVICE_CHECK, &adapter->state)) { - usleep_range(100, 200); - time++; - if (time > 100) + if (timeout >= 100 && timeout < 101) { + e_info(drv, "wait ring %d tx done timeout %x %x\n", + i, head, tail); + adapter->priv_flags |= RNP_PRIV_FLGA_TEST_TX_HANG; + print_status(adapter); + err = 1; + } + + if (timeout >= 200) { + e_info(drv, "200 wait tx done timeout %x %x\n", + head, tail); + print_status(adapter); break; + } } } - if (free_tx_ealay) - rnpgbe_clean_all_tx_rings(adapter); - + rnpgbe_clean_all_tx_rings(adapter); usleep_range(2000, 5000); - rnpgbe_irq_disable(adapter); - usleep_range(5000, 10000); - netif_tx_disable(netdev); /* disable all enabled rx queues */ - for (i = 0; i < adapter->num_rx_queues; i++) + for (i = 0; i < adapter->num_rx_queues && is_pci_online; i++) rnpgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); /* call carrier off first to avoid false dev_watchdog timeouts */ @@ -3634,24 +3741,24 @@ void rnpgbe_down(struct rnpgbe_adapter *adapter) rnpgbe_ping_all_vfs(adapter); /* disable transmits in the hardware now that interrupts are off */ - for (i = 0; i < adapter->num_tx_queues; i++) { + for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) { struct rnpgbe_ring *tx_ring = adapter->tx_ring[i]; if (!err) ring_wr32(tx_ring, RNP_DMA_TX_START, 0); } if (!err) { - if (!pci_channel_offline(adapter->pdev)) + if (!pci_channel_offline(adapter->pdev)) { if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) rnpgbe_reset(adapter); + else if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + rnpgbe_reset(adapter); + } } /* power down the optics for n10 SFP+ fiber */ if (hw->ops.disable_tx_laser) hw->ops.disable_tx_laser(hw); - if (!free_tx_ealay) - rnpgbe_clean_all_tx_rings(adapter); - rnpgbe_clean_all_rx_rings(adapter); } @@ -3760,7 +3867,7 @@ static int rnpgbe_sw_init(struct rnpgbe_adapter *adapter) adapter->tx_lpi_timer = RNP_DEFAULT_TWT_LS; /* itr sw setup here */ - adapter->sample_interval = 10; + adapter->sample_interval = 1; adapter->adaptive_rx_coal = 1; adapter->adaptive_tx_coal = 1; adapter->auto_rx_coal = 0; @@ -4095,11 +4202,11 @@ static int rnpgbe_tx_maxrate(struct net_device *netdev, int queue_index, rnpgbe_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate); if (!maxrate) return rnpgbe_setup_tx_maxrate(tx_ring, - 0, adapter->hw.usecstocount * 1000000); + 0, adapter->hw.usecstocount * 100000); /* we need turn it to bytes/s */ - real_rate = ((u64)maxrate * 1024 * 1024) / 8; + real_rate = ((u64)maxrate * 1000 * 94) >> 3; rnpgbe_setup_tx_maxrate(tx_ring, real_rate, - adapter->hw.usecstocount * 1000000); + adapter->hw.usecstocount * 100000); return 0; } @@ -4243,14 +4350,15 @@ static int rnpgbe_resume(struct device *dev) if (!err) err = register_mbx_irq(adapter); - if (hw->ops.driver_status) - hw->ops.driver_status(hw, false, rnpgbe_driver_suspuse); - - if (hw->ops.driver_status) - hw->ops.driver_status(hw, true, rnpgbe_driver_insmod); + hw->ops.driver_status(hw, false, rnpgbe_driver_suspuse); + hw->ops.driver_status(hw, true, rnpgbe_driver_insmod); rnpgbe_reset(adapter); + /* we should setup link in default */ + hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0); + hw->advertised_link = DEFAULT_ADV; + if (!err) { if (netif_running(netdev)) { err = rnpgbe_open(netdev); @@ -4286,10 +4394,12 @@ static int rnpgbe_freeze(struct device *dev) if (netif_running(netdev)) { rnpgbe_down(adapter); rnpgbe_free_irq(adapter); + rnpgbe_free_all_tx_resources(adapter); + rnpgbe_free_all_rx_resources(adapter); } remove_mbx_irq(adapter); - rnpgbe_reset_interrupt_capability(adapter); + rnpgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); return 0; @@ -4303,18 +4413,17 @@ static int rnpgbe_thaw(struct device *dev) { struct rnpgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = adapter->netdev; + u32 err; - rnpgbe_set_interrupt_capability(adapter); - register_mbx_irq(adapter); - - if (netif_running(netdev)) { - u32 err = rnpgbe_request_irq(adapter); + rtnl_lock(); + err = rnpgbe_init_interrupt_scheme(adapter); - if (err) - return err; + if (netif_running(netdev)) + rnpgbe_open(netdev); - rnpgbe_up(adapter); - } + rtnl_unlock(); + if (err) + return err; netif_device_attach(netdev); @@ -4341,8 +4450,8 @@ static int __rnpgbe_shutdown_suspuse(struct pci_dev *pdev, bool *enable_wake) } rtnl_unlock(); - - if (hw->ops.driver_status) + /* if we open wol or ncsi_en, we must send this to hw */ + if (hw->ncsi_en || adapter->wol) hw->ops.driver_status(hw, true, rnpgbe_driver_suspuse); remove_mbx_irq(adapter); @@ -4393,7 +4502,8 @@ static int __rnpgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) rtnl_unlock(); - if (hw->ops.driver_status) + /* only send mbx if ncsi or wol on */ + if (hw->ncsi_en || adapter->wol) hw->ops.driver_status(hw, false, rnpgbe_driver_insmod); remove_mbx_irq(adapter); @@ -4463,46 +4573,59 @@ static void rnpgbe_shutdown(struct pci_dev *pdev) void rnpgbe_update_stats(struct rnpgbe_adapter *adapter) { struct net_device_stats *net_stats = &adapter->netdev->stats; - struct rnpgbe_hw *hw = &adapter->hw; struct rnpgbe_hw_stats *hw_stats = &adapter->hw_stats; - int i; + struct rnpgbe_hw *hw = &adapter->hw; struct rnpgbe_ring *ring; u64 hw_csum_rx_error = 0; u64 hw_csum_rx_good = 0; + u64 vlan_strip_cnt = 0; + u64 vlan_add_cnt = 0; + u64 tx_packets = 0; + u64 rx_packets = 0; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + int i; + + if (!adapter->link_up) + return; - net_stats->tx_packets = 0; - net_stats->tx_bytes = 0; - net_stats->rx_packets = 0; - net_stats->rx_bytes = 0; net_stats->rx_dropped = 0; - net_stats->rx_errors = 0; - hw_stats->vlan_strip_cnt = 0; - hw_stats->vlan_add_cnt = 0; + net_stats->tx_dropped = 0; if (test_bit(__RNP_DOWN, &adapter->state) || test_bit(__RNP_RESETTING, &adapter->state)) return; + if (pci_channel_offline(adapter->pdev)) + return; + for (i = 0; i < adapter->num_q_vectors; i++) { rnpgbe_for_each_ring(ring, adapter->q_vector[i]->rx) { hw_csum_rx_error += ring->rx_stats.csum_err; hw_csum_rx_good += ring->rx_stats.csum_good; - hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove; - net_stats->rx_packets += ring->stats.packets; - net_stats->rx_bytes += ring->stats.bytes; + vlan_strip_cnt += ring->rx_stats.vlan_remove; + rx_packets += ring->stats.packets; + rx_bytes += ring->stats.bytes; } rnpgbe_for_each_ring(ring, adapter->q_vector[i]->tx) { - hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add; - net_stats->tx_packets += ring->stats.packets; - net_stats->tx_bytes += ring->stats.bytes; + vlan_add_cnt += ring->tx_stats.vlan_add; + tx_packets += ring->stats.packets; + tx_bytes += ring->stats.bytes; } } - net_stats->rx_errors += hw_csum_rx_error; hw->ops.update_hw_status(hw, hw_stats, net_stats); adapter->hw_csum_rx_error = hw_csum_rx_error; adapter->hw_csum_rx_good = hw_csum_rx_good; - net_stats->rx_errors = hw_csum_rx_error; + net_stats->rx_errors += hw_csum_rx_error; + /* update to net_stats */ + net_stats->tx_packets = tx_packets; + net_stats->tx_bytes = tx_bytes; + hw_stats->vlan_add_cnt = vlan_add_cnt; + net_stats->rx_packets = rx_packets; + net_stats->rx_bytes = rx_bytes; + hw_stats->vlan_strip_cnt = vlan_strip_cnt; + } /** @@ -4647,15 +4770,13 @@ static int rnpgbe_phy_init_eee(struct rnpgbe_adapter *adapter) if (!(adapter->local_eee & adapter->partner_eee)) return -EIO; - if (hw->hw_type == rnpgbe_hw_n500 || hw->hw_type == rnpgbe_hw_n210) { - /* n500 only support eee in 100/1000 full */ - if (!hw->duplex) - return -EIO; + /* only support eee in 100/1000 full */ + if (!hw->duplex) + return -EIO; - if (adapter->speed != RNP_LINK_SPEED_100_FULL && - adapter->speed != RNP_LINK_SPEED_1GB_FULL) - return -EIO; - } + if (adapter->speed != RNP_LINK_SPEED_100_FULL && + adapter->speed != RNP_LINK_SPEED_1GB_FULL) + return -EIO; /* if in sriov mode cannot open eee */ if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) @@ -4789,11 +4910,7 @@ void rnpgbe_service_timer(struct timer_list *t) unsigned long next_event_offset; bool ready = true; - /* poll faster when waiting for link */ - if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE) - next_event_offset = HZ / 10; - else - next_event_offset = HZ * 2; + next_event_offset = HZ; /* Reset the timer */ if (!test_bit(__RNP_REMOVE, &adapter->state)) mod_timer(&adapter->service_timer, next_event_offset + jiffies); @@ -4861,19 +4978,75 @@ static void rnpgbe_reset_subtask(struct rnpgbe_adapter *adapter) rtnl_unlock(); } -static void rnpgbe_rx_len_reset_subtask(struct rnpgbe_adapter *adapter) +static void rnpgbe_auto_itr_moderation(struct rnpgbe_adapter *adapter) { int i; struct rnpgbe_ring *rx_ring; + u64 period = (u64)(jiffies - adapter->last_moder_jiffies); + + if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE) + return; + + if (!adapter->adaptive_rx_coal || + period < adapter->sample_interval * HZ) + return; + + adapter->last_moder_jiffies = jiffies; + + /* it is time to check moderation */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u64 x, rate; + u64 rx_packets, packets, rx_pkt_diff; - for (i = 0; i < adapter->num_tx_queues; i++) { rx_ring = adapter->rx_ring[i]; - if (unlikely(rx_ring->ring_flags & - RNP_RING_FLAG_DO_RESET_RX_LEN)) { - dbg("[%s] Rx-ring %d count reset\n", - adapter->netdev->name, rx_ring->rnpgbe_queue_idx); - rnpgbe_rx_ring_reinit(adapter, rx_ring); - rx_ring->ring_flags &= (~RNP_RING_FLAG_DO_RESET_RX_LEN); + rx_packets = READ_ONCE(rx_ring->stats.packets); + rx_pkt_diff = rx_packets - + adapter->last_moder_packets[rx_ring->queue_index]; + packets = rx_pkt_diff; + x = packets * HZ; + do_div(x, period); + rate = x; + + if (packets != 0 && rate < 20000) + rx_ring->ring_flags |= RNP_RING_LOWER_ITR; + else if (packets != 0) + rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR); + + /* write back new count */ + adapter->last_moder_packets[rx_ring->queue_index] = rx_packets; + } +} + +static void rnpgbe_monitor_msix_vector(struct rnpgbe_adapter *adapter) +{ + struct rnpgbe_hw *hw = &adapter->hw; + struct device *dev = &hw->pdev->dev; + int v_base = hw->msix_vector_base; + struct rnpgbe_q_vector *q_vector; + u32 val, i; + int v_idx; + + if (adapter->hw.mbx.other_irq_enabled) { + val = hw_rd32(hw, v_base + 0xc); + if (val & BIT(0)) { + dev_info(dev, "mbx mask detected\n"); + hw_wr32(hw, v_base + 0xc, 0); + } + } + + if (test_bit(__RNP_DOWN, &adapter->state)) + return; + /* only check if msix mode */ + if (!(adapter->flags & RNP_FLAG_MSIX_ENABLED)) + return; + for ((i) = 0; (i) < (adapter)->num_q_vectors; (i)++) { + q_vector = adapter->q_vector[i]; + v_idx = q_vector->v_idx; + + val = hw_rd32(hw, v_base + 0xc + 0x10 * v_idx); + if (val & BIT(0)) { + dev_info(dev, "vidx %d mask detected\n", v_idx); + hw_wr32(hw, v_base + 0xc + 0x10 * v_idx, 0); } } } @@ -4890,7 +5063,8 @@ void rnpgbe_service_task(struct work_struct *work) rnpgbe_reset_subtask(adapter); rnpgbe_reset_pf_subtask(adapter); rnpgbe_watchdog_subtask(adapter); - rnpgbe_rx_len_reset_subtask(adapter); + rnpgbe_auto_itr_moderation(adapter); + rnpgbe_monitor_msix_vector(adapter); rnpgbe_service_event_complete(adapter); } @@ -5657,14 +5831,17 @@ static int rnpgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, static int rnpgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct rnpgbe_adapter *adapter = netdev_priv(netdev); - /* ptp 1588 used this */ switch (cmd) { case SIOCGHWTSTAMP: + if (!adapter->hwts_ops) + return -EINVAL; if (module_enable_ptp) return rnpgbe_ptp_get_ts_config(adapter, req); break; case SIOCSHWTSTAMP: + if (!adapter->hwts_ops) + return -EINVAL; if (module_enable_ptp) return rnpgbe_ptp_set_ts_config(adapter, req); break; @@ -5759,19 +5936,13 @@ int rnpgbe_setup_tc(struct net_device *dev, u8 tc) struct rnpgbe_hw *hw = &adapter->hw; int ret = 0; - if (hw->hw_type != rnpgbe_hw_n10 && (tc)) + if (tc) return -EINVAL; - if (hw->ops.driver_status) + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { hw->ops.driver_status(hw, true, - rnpgbe_driver_force_control_mac); - - /* Hardware supports up to 8 traffic classes */ - if (tc > RNP_MAX_TCS_NUM || tc == 1) - return -EINVAL; - /* we cannot support tc with sriov mode */ - if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) - return -EINVAL; + rnpgbe_driver_force_control_phy); + } /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the @@ -5787,16 +5958,6 @@ int rnpgbe_setup_tc(struct net_device *dev, u8 tc) adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); remove_mbx_irq(adapter); rnpgbe_clear_interrupt_scheme(adapter); - adapter->num_tc = tc; - - if (tc) { - netdev_set_num_tc(dev, tc); - adapter->flags |= RNP_FLAG_DCB_ENABLED; - } else { - netdev_reset_tc(dev); - adapter->flags &= ~RNP_FLAG_DCB_ENABLED; - } - rnpgbe_init_interrupt_scheme(adapter); register_mbx_irq(adapter); /* rss table must reset */ @@ -5805,10 +5966,10 @@ int rnpgbe_setup_tc(struct net_device *dev, u8 tc) if (netif_running(dev)) ret = rnpgbe_open(dev); - if (hw->ops.driver_status) + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { hw->ops.driver_status(hw, false, - rnpgbe_driver_force_control_mac); - + rnpgbe_driver_force_control_phy); + } clear_bit(__RNP_RESETTING, &adapter->state); return ret; } @@ -6012,12 +6173,12 @@ rnpgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, mode = nla_get_u16(attr); if (mode == BRIDGE_MODE_VEPA) { adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB; - wr32(hw, RNP_DMA_CONFIG, - rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS); + hw_wr32(hw, RNP_DMA_CONFIG, + hw_rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS); } else if (mode == BRIDGE_MODE_VEB) { adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; - wr32(hw, RNP_DMA_CONFIG, - rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + hw_wr32(hw, RNP_DMA_CONFIG, + hw_rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); } else { return -EINVAL; @@ -6058,6 +6219,12 @@ static netdev_features_t rnpgbe_features_check(struct sk_buff *skb, netdev_features_t features) { unsigned int network_hdr_len, mac_hdr_len; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; /* Make certain the headers can be described by a context descriptor */ mac_hdr_len = skb_network_header(skb) - skb->data; @@ -6077,10 +6244,24 @@ static netdev_features_t rnpgbe_features_check(struct sk_buff *skb, if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) features &= ~NETIF_F_TSO; + if (skb_is_gso(skb) && (skb->len > (0xffff - skb_shinfo(skb)->gso_size))) { + l4.hdr = skb_transport_header(skb); + l4_offset = l4.hdr - skb->data; + paylen = skb->len - l4_offset; + + if (skb->csum_offset == offsetof(struct tcphdr, check)) + paylen -= l4.tcp->doff * 4; + else + paylen -= 8; + /* close tso if skb more than total- mtu, but not Integer multiple of mss */ + if (paylen % skb_shinfo(skb)->gso_size) + features &= ~NETIF_F_TSO; + } + return features; } -const struct net_device_ops rnpgbe_netdev_ops = { +static const struct net_device_ops rnpgbe_netdev_ops = { .ndo_open = rnpgbe_open, .ndo_stop = rnpgbe_close, .ndo_start_xmit = rnpgbe_xmit_frame, @@ -6133,6 +6314,7 @@ int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id) switch (device_id) { case PCI_DEVICE_ID_N210: + case PCI_DEVICE_ID_N210L: case PCI_DEVICE_ID_N500_QUAD_PORT: case PCI_DEVICE_ID_N500_DUAL_PORT: is_wol_supported = 1; @@ -6152,8 +6334,9 @@ static void remove_mbx_irq(struct rnpgbe_adapter *adapter) if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { adapter->hw.mbx.ops.configure(&adapter->hw, adapter->msix_entries[0].entry, false); + if (!adapter->hw.mbx.other_irq_enabled) + return; free_irq(adapter->msix_entries[0].vector, adapter); - adapter->hw.mbx.other_irq_enabled = false; } } @@ -6187,12 +6370,17 @@ static int register_mbx_irq(struct rnpgbe_adapter *adapter) return err; } -static int rnpgbe_rm_adpater(struct rnpgbe_adapter *adapter) +static int rnpgbe_rm_adapter(struct rnpgbe_adapter *adapter) { struct net_device *netdev; struct rnpgbe_hw *hw = &adapter->hw; netdev = adapter->netdev; + /* if not register, just return */ + if (adapter->flags2 & RNP_FLAG2_NO_NET_REG) { + free_netdev(netdev); + return 0; + } pr_info("= remove adapter:%s =\n", netdev->name); rnpgbe_dbg_adapter_exit(adapter); netif_carrier_off(netdev); @@ -6220,16 +6408,7 @@ static int rnpgbe_rm_adpater(struct rnpgbe_adapter *adapter) adapter->netdev = NULL; - /* we should set this to false when remove driver */ - if (hw->ncsi_en && - (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { - if (hw->ops.driver_status) - hw->ops.driver_status(hw, false, - rnpgbe_driver_force_control_mac); - } - - if (hw->ops.driver_status) - hw->ops.driver_status(hw, false, rnpgbe_driver_insmod); + hw->ops.driver_status(hw, false, rnpgbe_driver_insmod); remove_mbx_irq(adapter); rnpgbe_clear_interrupt_scheme(adapter); @@ -6246,7 +6425,64 @@ static int rnpgbe_rm_adpater(struct rnpgbe_adapter *adapter) return 0; } -static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, struct file *file, +static int rnpgbe_check_fw_from_flash(struct rnpgbe_hw *hw, + const u8 *data, + int len) +{ + u32 device_id; + int ret = 0; + u32 chip_data; + u32 crc32 = 0xffffffff; + u32 crc32_goal; + struct crc32_info *info = (struct crc32_info *)(data + CRC_OFFSET); + + if (*((u32 *)(data)) != 0xa55aa55a) + return -EINVAL; + + if (info->magic == CRC32_MAGIC) { + crc32_goal = info->crc32; + info->crc32 = 0; + info->magic = 0; + crc32 = crc32_le(crc32, data, len); + if (crc32 != crc32_goal) + return -1; + info->magic = CRC32_MAGIC; + info->crc32 = crc32_goal; + } + + device_id = *((u16 *)data + 30); + + /* if no device_id no check */ + if (device_id == 0 || device_id == 0xffff) + return 0; + +#define CHIP_OFFSET (0x1f014 + 0x1000) + /* we should get hw_type from sfc-flash */ + chip_data = ioread32(hw->hw_addr + CHIP_OFFSET); + if (chip_data == 0x11111111) + hw->hw_type = rnpgbe_hw_n210; + else if (chip_data == 0x0) + hw->hw_type = rnpgbe_hw_n210L; + else + return 0; + + switch (hw->hw_type) { + case rnpgbe_hw_n210: + if (device_id != 0x8208) + ret = 1; + break; + case rnpgbe_hw_n210L: + if (device_id != 0x820a) + ret = 1; + break; + default: + ret = 1; + } + + return ret; +} + +static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, const u8 *data, int file_size) { struct device *dev = &hw->pdev->dev; @@ -6271,7 +6507,13 @@ static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, struct file *file, } old_pos = pos; - get_len = kernel_read(file, buf, rd_len, &pos); + if (end_pos - pos < rd_len) + get_len = end_pos - pos; + else + get_len = rd_len; + + memcpy(buf, data + pos, get_len); + if ((get_len < rd_len && ((old_pos + get_len) != end_pos)) || get_len < 0) { dev_err(dev, "read err, pos 0x%x, get len %d", @@ -6290,17 +6532,35 @@ static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, struct file *file, pos += get_len; } + /* write first 4k header */ + pos = 0; + old_pos = pos; + get_len = rd_len; + + memcpy(buf, data + pos, get_len); + + for (iter = 0; iter < get_len; iter += 4) { + old_data = *((u32 *)(buf + iter)); + fw_off = (u32)old_pos + iter + 0x1000; + iowrite32(old_data, (hw->hw_addr + fw_off)); + } + dev_info(dev, "Checking for firmware. Wait a moment, please."); /* check */ pos = 0x0; while (pos < end_pos) { - if (pos >= 0x1f000 && pos < 0x20000) { + if ((pos >= 0x1f000 && pos < 0x20000) || pos == 0) { pos += rd_len; continue; } old_pos = pos; - get_len = kernel_read(file, buf, rd_len, &pos); + if (end_pos - pos < rd_len) + get_len = end_pos - pos; + else + get_len = rd_len; + + memcpy(buf, data + pos, get_len); if ((get_len < rd_len && ((old_pos + get_len) != end_pos)) || get_len < 0) { @@ -6331,7 +6591,7 @@ static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, struct file *file, return err; } -static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, +static int rnpgbe_add_adapter(struct pci_dev *pdev, struct rnpgbe_info *ii, struct rnpgbe_adapter **padapter) { int i, err = 0; @@ -6387,29 +6647,29 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, return -EIO; } - pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N500, + pr_info("[bar%d]:%p %llx len=%d kB\n", RNP_NIC_BAR_N500, hw_addr, (unsigned long long)pci_resource_start(pdev, RNP_NIC_BAR_N500), - (int)pci_resource_len(pdev, RNP_NIC_BAR_N500) / 1024 / - 1024); + (int)pci_resource_len(pdev, RNP_NIC_BAR_N500) / 1024); /* get dma version */ dma_version = rnpgbe_rd_reg(hw_addr); hw->hw_addr = hw_addr; /* setup msix base */ hw->ring_msix_base = hw->hw_addr + 0x28700; hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev)); - nic_version = rd32(hw, RNP500_TOP_NIC_VERSION); + nic_version = hw_rd32(hw, RNPGBE_TOP_NIC_VERSION); adapter->irq_mode = irq_mode_msix; adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE | RNP_FLAG_LEGACY_CAPABLE; break; case rnpgbe_hw_n210: + case rnpgbe_hw_n210L: #define RNP_NIC_BAR_N210 2 if (pci_resource_len(pdev, 0) == 0x100000) { - char *path = "/lib/firmware/n210_driver_update.bin"; - struct file *file = NULL; - int file_size = 0; + char *filename = "n210_driver_update.bin"; + const struct firmware *fw; + int rc; hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); @@ -6419,34 +6679,40 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, return -EIO; } - file = filp_open(path, O_RDONLY, 0); - if (IS_ERR(file)) { - dev_err(&pdev->dev, - "filp_open(%s) failed with err %ld", - path, PTR_ERR(file)); - err = PTR_ERR(file); - return err; + rc = request_firmware(&fw, filename, &pdev->dev); + if (rc != 0) { + dev_err(&pdev->dev, "Error %d requesting firmware file: %s\n", rc, + filename); + return rc; + } + + if (rnpgbe_check_fw_from_flash(hw, fw->data, fw->size)) { + release_firmware(fw); + dev_info(&pdev->dev, "firmware type error\n"); + return -EIO; } - file_size = file->f_inode->i_size; - dev_info(&pdev->dev, "%s size %u", path, file_size); - err = rsp_hal_sfc_flash_erase(hw, file_size); + rsp_hal_sfc_write_protect(hw, 0); + + err = rsp_hal_sfc_flash_erase(hw, fw->size); if (err) { + release_firmware(fw); dev_err(&pdev->dev, "erase flash failed!"); - fput(file); return err; } - err = rnpgbe_init_firmware(hw, file, file_size); + err = rnpgbe_init_firmware(hw, fw->data, fw->size); if (err) { + release_firmware(fw); dev_err(&pdev->dev, "init firmware failed!"); - fput(file); return err; } dev_info(&pdev->dev, "init firmware successfully."); dev_info(&pdev->dev, "Please reboot. Then you can use the device."); - fput(file); + release_firmware(fw); + iounmap(hw->hw_addr); + adapter->flags2 |= RNP_FLAG2_NO_NET_REG; return 0; } hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N210), @@ -6469,7 +6735,7 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, /* setup msix base */ hw->ring_msix_base = hw->hw_addr + 0x29000; hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev)); - nic_version = rd32(hw, RNP500_TOP_NIC_VERSION); + nic_version = hw_rd32(hw, RNPGBE_TOP_NIC_VERSION); adapter->irq_mode = irq_mode_msix; adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE | RNP_FLAG_LEGACY_CAPABLE; @@ -6495,9 +6761,9 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); if (dma_version >= 0x20210111) { rnpgbe_mbx_link_event_enable(hw, 0); - if (hw->hw_type == rnpgbe_hw_n10 || - hw->hw_type == rnpgbe_hw_n400) - rnpgbe_mbx_force_speed(hw, 0); + /* call driver status */ + hw->ops.driver_status(hw, true, rnpgbe_driver_insmod); + if (rnpgbe_mbx_get_capability(hw, ii)) { dev_err(&pdev->dev, "rnpgbe_mbx_get_capability failed!\n"); @@ -6547,6 +6813,11 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, adapter->wol = hw->wol; } } + + if (hw->force_en) + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + hw->driver_version = driver_version; + hw->default_rx_queue = 0; pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n", adapter->name, pci_name(pdev), hw->dma_version, nic_version, @@ -6576,9 +6847,9 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, e_dev_err("HW Init failed: %d\n", err); goto err_sw_init; } - /* call driver status */ - if (hw->ops.driver_status) - hw->ops.driver_status(hw, true, rnpgbe_driver_insmod); + + hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0); + hw->advertised_link = DEFAULT_ADV; /* should force phy down first */ hw->ops.set_mbx_link_event(hw, 0); @@ -6619,10 +6890,6 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, netdev->hw_features |= netdev->features; - if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; - if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) - netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER; if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; if (!hw->ncsi_en) @@ -6732,6 +6999,7 @@ static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii, } /* reset the hardware with the new settings */ err = hw->ops.start_hw(hw); + set_bit(__RNP_DOWN, &adapter->state); strscpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); @@ -6828,7 +7096,7 @@ static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_save_state(pdev); - err = rnpgbe_add_adpater(pdev, ii, &adapter); + err = rnpgbe_add_adapter(pdev, ii, &adapter); if (err) goto err_regions; @@ -6854,6 +7122,9 @@ static void rnpgbe_remove(struct pci_dev *pdev) { struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev); + if (pci_channel_offline(pdev)) + netif_device_detach(adapter->netdev); + #if IS_ENABLED(CONFIG_PCI_IOV) /* Only disable SR-IOV on unload if the user specified the now * deprecated max_vfs module parameter. @@ -6861,7 +7132,7 @@ static void rnpgbe_remove(struct pci_dev *pdev) rnpgbe_disable_sriov(adapter); #endif - rnpgbe_rm_adpater(adapter); + rnpgbe_rm_adapter(adapter); pci_release_mem_regions(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c index e55629db1161..3bb67852dcab 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c @@ -249,6 +249,10 @@ static s32 rnpgbe_write_posted_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size, struct rnpgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = RNP_ERR_MBX; + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; @@ -277,6 +281,10 @@ static s32 rnpgbe_check_for_msg_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id) u16 hw_req_count = 0; struct rnpgbe_mbx_info *mbx = &hw->mbx; + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (mbx_id == MBX_CM3CPU) { hw_req_count = rnpgbe_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); if (hw_req_count != 0 && hw_req_count != hw->mbx.cpu_req) { @@ -304,12 +312,19 @@ static s32 rnpgbe_check_for_msg_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id) **/ static s32 rnpgbe_check_for_ack_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id) { - s32 ret_val = RNP_ERR_MBX; struct rnpgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + u16 hw_cpu_ack = 0; + + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; if (mbx_id == MBX_CM3CPU) { - if (rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)) != - hw->mbx.cpu_ack) { + hw_cpu_ack = rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)); + + if (hw_cpu_ack != 0 && + hw_cpu_ack != hw->mbx.cpu_ack) { ret_val = 0; hw->mbx.stats.acks++; } @@ -336,7 +351,7 @@ static s32 rnpgbe_obtain_mbx_lock_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id) int try_cnt = 5000; struct rnpgbe_mbx_info *mbx = &hw->mbx; u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : - PF2VF_MBOX_CTRL(mbx, mbx_id); + PF2VF_MBOX_CTRL(mbx, mbx_id); while (try_cnt-- > 0) { /* Take ownership of the buffer */ @@ -374,6 +389,10 @@ static s32 rnpgbe_write_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size, u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : PF2VF_MBOX_CTRL(mbx, mbx_id); + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (size > RNP_VFMAILBOX_SIZE) return -EINVAL; @@ -429,6 +448,11 @@ static s32 rnpgbe_read_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size, PF_VF_SHM_DATA(mbx, mbx_id); u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : PF2VF_MBOX_CTRL(mbx, mbx_id); + + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (size > RNP_VFMAILBOX_SIZE) return -EINVAL; /* lock the mailbox to prevent pf/vf race condition */ @@ -484,11 +508,11 @@ static void rnpgbe_mbx_reset(struct rnpgbe_hw *hw) mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); if (PF_VF_MBOX_MASK_LO(mbx)) - wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); if (PF_VF_MBOX_MASK_HI(mbx)) - wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); - wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); + mbx_wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000); } static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec, @@ -498,6 +522,10 @@ static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec, u32 v; struct rnpgbe_mbx_info *mbx = &hw->mbx; + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (enable) { for (idx = 0; idx < hw->max_vfs; idx++) { v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); @@ -510,7 +538,7 @@ static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec, v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); hw->mbx.cpu_req = v & 0xffff; hw->mbx.cpu_ack = (v >> 16) & 0xffff; - /* release pf->cm3 buffer lock */ + /* release pf->cm3 buffer lock */ mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); /* allow VF to PF MBX IRQ */ @@ -518,29 +546,28 @@ static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec, mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx), nr_vec); if (PF_VF_MBOX_MASK_LO(mbx)) - wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); if (PF_VF_MBOX_MASK_HI(mbx)) - wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); /* bind cm3cpu mbx to irq */ - wr32(hw, CPU2PF_MBOX_VEC(mbx), nr_vec); - wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); - + mbx_wr32(hw, CPU2PF_MBOX_VEC(mbx), nr_vec); + mbx_wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000); } else { if (PF_VF_MBOX_MASK_LO(mbx)) - wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0xffffffff); + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0xffffffff); if (PF_VF_MBOX_MASK_HI(mbx)) - wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0xffffffff); + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0xffffffff); /* disable CM3CPU to PF MBX IRQ */ - wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffffffff); + mbx_wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xfffffffe); /* reset vf->pf status/ctrl */ for (idx = 0; idx < hw->max_vfs; idx++) mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); - wr32(hw, RNP_DMA_DUMY, 0); + mbx_wr32(hw, RNP_DMA_DUMY, 0); } return 0; @@ -581,7 +608,7 @@ s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw) return 0; } -struct rnpgbe_mbx_operations mbx_ops_generic = { +struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic = { .init_params = rnpgbe_init_mbx_params_pf, .read = rnpgbe_read_mbx_pf, .write = rnpgbe_write_mbx_pf, diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h index 4465b0b02d96..13d124ca9995 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h @@ -185,7 +185,7 @@ s32 rnpgbe_check_for_rst(struct rnpgbe_hw *hw, enum MBX_ID); s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw); -extern struct rnpgbe_mbx_operations mbx_ops_generic; +extern struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic; int rnpgbe_fw_get_macaddr(struct rnpgbe_hw *hw, int pfvfnum, u8 *mac_addr, int lane); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c index dbe69805f849..641f327d218d 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -32,6 +32,10 @@ static int rnpgbe_mbx_write_posted_locked(struct rnpgbe_hw *hw, int err = 0; int retry = 3; + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (mutex_lock_interruptible(&hw->mbx.lock)) { rnpgbe_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, req->opcode); @@ -62,12 +66,12 @@ static int rnpgbe_mbx_write_posted_locked(struct rnpgbe_hw *hw, static void rnpgbe_link_stat_mark_reset(struct rnpgbe_hw *hw) { - wr32(hw, RNP_DMA_DUMY, 0xa0000000); + mbx_wr32(hw, RNP_DMA_DUMY, 0xa0000000); } static void rnpgbe_link_stat_mark_disable(struct rnpgbe_hw *hw) { - wr32(hw, RNP_DMA_DUMY, 0); + mbx_wr32(hw, RNP_DMA_DUMY, 0); } static int rnpgbe_mbx_fw_post_req(struct rnpgbe_hw *hw, struct mbx_fw_cmd_req *req, @@ -76,6 +80,10 @@ static int rnpgbe_mbx_fw_post_req(struct rnpgbe_hw *hw, struct mbx_fw_cmd_req *r int err = 0; struct rnpgbe_adapter *adpt = hw->back; + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + cookie->errcode = 0; cookie->done = 0; init_waitqueue_head(&cookie->wait); @@ -99,12 +107,9 @@ static int rnpgbe_mbx_fw_post_req(struct rnpgbe_hw *hw, struct mbx_fw_cmd_req *r } if (cookie->timeout_jiffes != 0) { -retry: - err = wait_event_interruptible_timeout(cookie->wait, - cookie->done == 1, - cookie->timeout_jiffes); - if (err == -ERESTARTSYS) - goto retry; + err = wait_event_timeout(cookie->wait, + cookie->done == 1, + cookie->timeout_jiffes); if (err == 0) { rnpgbe_err("[%s] pfvfnum:0x%x timeout err:%d opcode:%x\n", adpt->name, hw->pfvfnum, err, @@ -136,6 +141,10 @@ static int rnpgbe_fw_send_cmd_wait(struct rnpgbe_hw *hw, struct mbx_fw_cmd_req * return -EINVAL; } + /* if pcie off, nothing todo */ + if (pci_channel_offline(hw->pdev)) + return -EIO; + if (mutex_lock_interruptible(&hw->mbx.lock)) { rnpgbe_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, req->opcode); @@ -247,22 +256,6 @@ int rnpgbe_mbx_get_lane_stat(struct rnpgbe_hw *hw) hw->advertised_link = st->advertised_link; hw->tp_mdx = st->tp_mdx; - if (hw->hw_type == rnpgbe_hw_n10 || hw->hw_type == rnpgbe_hw_n400) { - if (hw->fw_version >= 0x00050000) { - hw->sfp_connector = st->sfp_connector; - hw->duplex = st->duplex; - adpt->an = st->autoneg; - } else { - hw->sfp_connector = 0xff; - hw->duplex = 1; - adpt->an = st->an; - } - if (hw->fw_version <= 0x00050000) { - hw->supported_link |= RNP_LINK_SPEED_10GB_FULL | - RNP_LINK_SPEED_1GB_FULL; - } - } - rnpgbe_logd(LOG_MBX_LINK_STAT, "%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d ", adpt->name, st->pma_type, st->phy_type, st->linkup, @@ -657,7 +650,7 @@ int rnpgbe_mbx_force_speed(struct rnpgbe_hw *hw, int speed) { int cmd = 0x01150000; - if (hw->force_10g_1g_speed_ablity == 0) + if (hw->force_10g_1g_speed_ability == 0) return -EINVAL; if (speed == RNP_LINK_SPEED_10GB_FULL) { @@ -762,7 +755,7 @@ int rnpgbe_mbx_get_dump(struct rnpgbe_hw *hw, int flags, u32 *data_out, return err ? -err : 0; } -int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, +int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, int bytes) { struct rnpgbe_mbx_info *mbx = &hw->mbx; @@ -782,6 +775,33 @@ int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, return -ENOMEM; } + /* if bytes more than ram_size, we update header at last */ + if (bytes > ram_size) { + offset += ram_size; + + if (hw->hw_type == rnpgbe_hw_n210 || + hw->hw_type == rnpgbe_hw_n210L) { + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + for (i = 0; i < ram_size; i = i + 4) + rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i, + 0xffffffff); + + build_fw_update_n500_req(&req, cookie, partition, 0); + if (hw->mbx.other_irq_enabled) { + err = rnpgbe_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + + hw->mbx.timeout = (20 * 1000 * 1000) / + hw->mbx.usec_delay; + err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + } + } + while (offset < bytes) { memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -807,59 +827,31 @@ int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, goto out; offset += ram_size; } + /* we write header at last */ + if (bytes > ram_size) { + offset = 0; + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); -out: - return err ? -err : 0; -} - -int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin, - int bytes) -{ - int err; - struct mbx_req_cookie *cookie = NULL; - struct mbx_fw_cmd_req req; - struct mbx_fw_cmd_reply reply; - void *dma_buf = NULL; - dma_addr_t dma_phy; - u64 address; - - cookie = mbx_cookie_zalloc(0); - if (!cookie) { - dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0); - return -ENOMEM; - } - - memset(&req, 0, sizeof(req)); - memset(&reply, 0, sizeof(reply)); - - dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); - if (!dma_buf) { - err = -ENOMEM; - goto quit; - } + for (i = 0; i < ram_size; i = i + 4) + rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i, + *(msg + offset / 4 + i / 4)); - memcpy(dma_buf, fw_bin, bytes); - address = dma_phy; - build_fw_update_req(&req, cookie, partition, address & 0xffffffff, - (address >> 32) & 0xffffffff, bytes); - if (hw->mbx.other_irq_enabled) { - err = rnpgbe_mbx_fw_post_req(hw, &req, cookie); - } else { - int old_mbx_timeout = hw->mbx.timeout; + build_fw_update_n500_req(&req, cookie, partition, offset); + if (hw->mbx.other_irq_enabled) { + err = rnpgbe_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; - hw->mbx.timeout = - (20 * 1000 * 1000) / hw->mbx.usec_delay; - err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply); - hw->mbx.timeout = old_mbx_timeout; + hw->mbx.timeout = (20 * 1000 * 1000) / + hw->mbx.usec_delay; + err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } } -quit: - if (dma_buf) - dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); - - kfree(cookie); - - return (err) ? -EIO : 0; +out: + return err ? -err : 0; } int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable) @@ -872,19 +864,19 @@ int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable) memset(&reply, 0, sizeof(reply)); if (enable) - wr32(hw, RNP_DMA_DUMY, 0xa0000000); + mbx_wr32(hw, RNP_DMA_DUMY, 0xa0000000); build_link_set_event_mask(&req, BIT(EVT_LINK_UP), (enable & 1) << EVT_LINK_UP, &req); err = rnpgbe_mbx_write_posted_locked(hw, &req); if (!enable) - wr32(hw, RNP_DMA_DUMY, 0); + mbx_wr32(hw, RNP_DMA_DUMY, 0); return err; } -int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil) +int rnpgbe_fw_get_capability(struct rnpgbe_hw *hw, struct phy_abilities *abil) { int err; struct mbx_fw_cmd_req req; @@ -959,7 +951,7 @@ int rnpgbe_mbx_ifinsmod(struct rnpgbe_hw *hw, int status) memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); - build_ifinsmod(&req, hw->nr_lane, status); + build_ifinsmod(&req, hw->driver_version, status); if (mutex_lock_interruptible(&hw->mbx.lock)) return -EAGAIN; @@ -1139,59 +1131,67 @@ int rnpgbe_mbx_phy_eee_set(struct rnpgbe_hw *hw, u32 tx_lpi_timer, int rnpgbe_mbx_get_capability(struct rnpgbe_hw *hw, struct rnpgbe_info *info) { int err; - struct phy_abilities ablity; + struct phy_abilities ability; int try_cnt = 3; - memset(&ablity, 0, sizeof(ablity)); + memset(&ability, 0, sizeof(ability)); rnpgbe_link_stat_mark_disable(hw); while (try_cnt--) { - err = rnpgbe_fw_get_capablity(hw, &ablity); + err = rnpgbe_fw_get_capability(hw, &ability); if (err == 0 && info) { - hw->lane_mask = ablity.lane_mask & 0xf; - info->mac = to_mac_type(&ablity); + hw->lane_mask = ability.lane_mask & 0xf; + info->mac = to_mac_type(&ability); info->adapter_cnt = hweight_long(hw->lane_mask); - hw->sfc_boot = (ablity.nic_mode & 0x1) ? 1 : 0; - hw->pxe_en = (ablity.nic_mode & 0x2) ? 1 : 0; - hw->ncsi_en = (ablity.nic_mode & 0x4) ? 1 : 0; - hw->pfvfnum = ablity.pfnum; - hw->speed = ablity.speed; + hw->sfc_boot = (ability.nic_mode & 0x1) ? 1 : 0; + hw->pxe_en = (ability.nic_mode & 0x2) ? 1 : 0; + hw->ncsi_en = (ability.nic_mode & 0x4) ? 1 : 0; + hw->pfvfnum = ability.pfnum; + hw->speed = ability.speed; hw->nr_lane = 0; - hw->fw_version = ablity.fw_version; + hw->fw_version = ability.fw_version; hw->mac_type = info->mac; - hw->phy_type = ablity.phy_type; - hw->axi_mhz = ablity.axi_mhz; - hw->port_ids = ablity.port_ids; - hw->bd_uid = ablity.bd_uid; - hw->phy_id = ablity.phy_id; + hw->phy_type = ability.phy_type; + hw->axi_mhz = ability.axi_mhz; + hw->port_ids = ability.port_ids; + hw->bd_uid = ability.bd_uid; + hw->phy_id = ability.phy_id; if (hw->fw_version >= 0x00050201 && - ablity.speed == SPEED_10000) { + ability.speed == SPEED_10000) { hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; - hw->force_10g_1g_speed_ablity = 1; + hw->force_10g_1g_speed_ability = 1; } if (hw->fw_version >= 0x0001012C) { /* this version can get wol_en from hw */ - hw->wol = ablity.wol_status & 0xff; - hw->wol_en = ablity.wol_status & 0x100; + hw->wol = ability.wol_status & 0xff; + hw->wol_en = ability.wol_status & 0x100; } else { /* other version only pf0 or ncsi can wol */ - hw->wol = ablity.wol_status & 0x1; - if (hw->ncsi_en || !ablity.pfnum) + hw->wol = ability.wol_status & 0xff; + if (hw->ncsi_en || !ability.pfnum) hw->wol_en = 1; } + /* 0.1.5.0 can get force status from fw */ + if (hw->fw_version >= 0x00010500) { + hw->force_en = ability.e.force_down_en; + hw->force_cap = 1; + } + /* 0.1.6.0 can get trim valid from hw */ + if (hw->fw_version >= 0x00010600) + hw->trim_valid = (ability.nic_mode & 0x8) ? 1 : 0; pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x,", __func__, hw->mode, info->mac, info->adapter_cnt, hw->lane_mask); pr_info("phy_type 0x%x, pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz,", hw->phy_type, hw->pfvfnum, - ablity.fw_version, ablity.axi_mhz); - pr_info("port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d ", - ablity.port_id[0], hw->bd_uid, - ablity.phy_id, ablity.ext_ablity, - hw->force_10g_1g_speed_ablity, - ablity.speed); + ability.fw_version, ability.axi_mhz); + pr_info("port_id:%d bd_uid:0x%08x 0x%x ex-ability:0x%x fs:%d speed:%d ", + ability.port_id[0], hw->bd_uid, + ability.phy_id, ability.ext_ability, + hw->force_10g_1g_speed_ability, + ability.speed); if (info->adapter_cnt != 0) return 0; } @@ -1246,50 +1246,47 @@ enum speed_enum { void rnpgbe_link_stat_mark(struct rnpgbe_hw *hw, int up) { + struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back; u32 v; - v = rd32(hw, RNP_DMA_DUMY); - if (hw->hw_type == rnpgbe_hw_n10 || hw->hw_type == rnpgbe_hw_n400) { - v &= ~(0xffff0000); - v |= 0xa5a40000; - if (up) - v |= BIT(0); - else - v &= ~BIT(0); - - } else if ((hw->hw_type == rnpgbe_hw_n500) || - (hw->hw_type == rnpgbe_hw_n210)) { - v &= ~(0x0f000f11); - v |= 0xa0000000; - if (up) { - v |= BIT(0); - switch (hw->speed) { - case 10: - v |= (speed_10 << 8); - break; - case 100: - v |= (speed_100 << 8); - break; - case 1000: - v |= (speed_1000 << 8); - break; - case 10000: - v |= (speed_10000 << 8); - break; - case 25000: - v |= (speed_25000 << 8); - break; - case 40000: - v |= (speed_40000 << 8); - break; - } - v |= (hw->duplex << 4); - v |= (hw->fc.current_mode << 24); - } else { - v &= ~BIT(0); + v = mbx_rd32(hw, RNP_DMA_DUMY); + v &= ~(0x0f000f11); + v |= 0xa0000000; + if (up) { + v |= BIT(0); + switch (hw->speed) { + case 10: + v |= (speed_10 << 8); + break; + case 100: + v |= (speed_100 << 8); + break; + case 1000: + v |= (speed_1000 << 8); + break; + case 10000: + v |= (speed_10000 << 8); + break; + case 25000: + v |= (speed_25000 << 8); + break; + case 40000: + v |= (speed_40000 << 8); + break; } + v |= (hw->duplex << 4); + v |= (hw->fc.current_mode << 24); + } else { + v &= ~BIT(0); + } + /* we should update lldp_status */ + if (hw->fw_version >= 0x00010500) { + if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP) + v |= BIT(6); + else + v &= (~BIT(6)); } - wr32(hw, RNP_DMA_DUMY, v); + mbx_wr32(hw, RNP_DMA_DUMY, v); } static inline int rnpgbe_mbx_fw_req_handler(struct rnpgbe_adapter *adapter, @@ -1313,22 +1310,22 @@ static inline int rnpgbe_mbx_fw_req_handler(struct rnpgbe_adapter *adapter, else adapter->hw.link = 0; - if (hw->hw_type == rnpgbe_hw_n500 || - hw->hw_type == rnpgbe_hw_n210) { - adapter->local_eee = req->link_stat.st[0].local_eee; - adapter->partner_eee = req->link_stat.st[0].partner_eee; + adapter->local_eee = req->link_stat.st[0].local_eee; + adapter->partner_eee = req->link_stat.st[0].partner_eee; + + if (hw->fw_version >= 0x00010500) { + if (req->link_stat.st[0].lldp_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP); } if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) { hw->speed = req->link_stat.st[0].speed; hw->duplex = req->link_stat.st[0].duplex; - /* n500 can update pause and tp */ - if (hw->hw_type == rnpgbe_hw_n500 || - hw->hw_type == rnpgbe_hw_n210) { - hw->fc.current_mode = - req->link_stat.st[0].pause; - hw->tp_mdx = req->link_stat.st[0].tp_mdx; - } + hw->fc.current_mode = + req->link_stat.st[0].pause; + hw->tp_mdx = req->link_stat.st[0].tp_mdx; switch (hw->speed) { case 10: @@ -1359,6 +1356,7 @@ static inline int rnpgbe_mbx_fw_req_handler(struct rnpgbe_adapter *adapter, adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; break; } + rnpgbe_service_event_schedule(adapter); return 0; } @@ -1382,7 +1380,7 @@ static inline int rnpgbe_mbx_fw_reply_handler(struct rnpgbe_adapter *adapter, else cookie->errcode = 0; - wake_up_interruptible(&cookie->wait); + wake_up(&cookie->wait); return 0; } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h index 98721c6939ba..10c18aa2510e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -153,18 +153,25 @@ struct phy_abilities { int wol_status; union { - int ext_ablity; + int ext_ability; struct { - int valid : 1; - int wol_en : 1; - int pci_preset_runtime_en : 1; - int smbus_en : 1; - int ncsi_en : 1; - int rpu_en : 1; - int v2 : 1; - int pxe_en : 1; - int mctp_en : 1; - }; + unsigned int valid : 1; + unsigned int wol_en : 1; + unsigned int pci_preset_runtime_en : 1; + unsigned int smbus_en : 1; + unsigned int ncsi_en : 1; + unsigned int rpu_en : 1; + unsigned int v2 : 1; + unsigned int pxe_en : 1; + unsigned int mctp_en : 1; + unsigned int yt8614 : 1; + unsigned int pci_ext_reset : 1; + unsigned int rpu_availble : 1; + unsigned int fw_lldp_ability : 1; + unsigned int lldp_enabled : 1; + unsigned int only_1g : 1; + unsigned int force_down_en: 1; + } e; }; } _PACKED_ALIGN4; @@ -218,7 +225,7 @@ struct link_stat_data { char rev1 : 1; /* 3:ignore */ char an_completed : 1; - char lp_an_ablity : 1; + char lp_an_ability : 1; char parallel_detection_fault : 1; char fec_enabled : 1; char low_power_state : 1; @@ -269,7 +276,8 @@ struct port_stat { u16 local_eee : 3; u16 partner_eee : 3; u16 tp_mdx : 2; - u16 revs : 4; + u16 lldp_status : 1; + u16 revs : 3; } __attribute__((packed)); struct phy_pause_data { @@ -574,7 +582,7 @@ struct mbx_fw_cmd_req { char nr_lane; } set_phy_reg; struct { - } get_phy_ablity; + } get_phy_ability; struct { int lane_mask; @@ -602,7 +610,7 @@ struct mbx_fw_cmd_req { unsigned int bin_phy_hi; } fw_update; }; -} _PACKED_ALIGN4; +} __packed; #define EEE_1000BT BIT(2) #define EEE_100BT BIT(1) @@ -1242,6 +1250,7 @@ enum MBX_ERR { MBX_ERR_NODEV, MBX_ERR_IO, }; -int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil); + +int rnpgbe_fw_get_capability(struct rnpgbe_hw *hw, struct phy_abilities *abil); #endif diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c index 46e6db81cd1c..779d7d66e521 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c @@ -116,11 +116,6 @@ static void get_systime(void __iomem *ioaddr, u64 *systime) *systime = ns; } -static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on) -{ - rnpgbe_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on); -} - static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, int gmac4) { @@ -164,9 +159,8 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, return 0; } -const struct rnpgbe_hwtimestamp mac_ptp = { +static const struct rnpgbe_hwtimestamp mac_ptp = { .config_hw_tstamping = config_hw_tstamping, - .config_mac_irq_enable = config_mac_interrupt_enable, .init_systime = init_systime, .config_sub_second_increment = config_sub_second_increment, .config_addend = config_addend, @@ -557,6 +551,7 @@ void rnpgbe_ptp_unregister(struct rnpgbe_adapter *pf) if (pf->ptp_clock) { ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; + pf->hwts_ops = NULL; pr_debug("Removed PTP HW clock successfully on %s\n", "rnpgbe_ptp"); } @@ -640,6 +635,7 @@ void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *adapter, { u64 ns = 0; u64 tsvalueh = 0, tsvaluel = 0; + __be32 value_h, value_l; struct skb_shared_hwtstamps *hwtstamps = NULL; if (!skb || !adapter->ptp_rx_en) { @@ -649,7 +645,7 @@ void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *adapter, return; } - if (likely(!(desc->wb.cmd & RNP_RXD_STAT_PTP))) + if (likely(!(desc->wb.cmd & cpu_to_le16(RNP_RXD_STAT_PTP)))) return; hwtstamps = skb_hwtstamps(skb); /* because of rx hwstamp store before the mac head @@ -660,14 +656,14 @@ void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *adapter, /* low8bytes is null high8bytes is timestamp * high32bit is seconds low32bits is nanoseconds */ - skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh, + skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &value_h, RNP_RX_SEC_SIZE); skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE, - &tsvaluel, RNP_RX_NANOSEC_SIZE); + &value_l, RNP_RX_NANOSEC_SIZE); skb_pull(skb, RNP_RX_HWTS_OFFSET); - tsvalueh = ntohl(tsvalueh); - tsvaluel = ntohl(tsvaluel); + tsvalueh = ntohl(value_h); + tsvaluel = ntohl(value_l); ns = tsvaluel & RNP_RX_NSEC_MASK; ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h index 5935084c7535..5648e61abef3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h @@ -87,7 +87,6 @@ struct rnpgbe_hwtimestamp { #define PTP_GET_TX_HWTS_UPDATE (0) /* hardware ts can't so fake ts from the software clock */ #define DEBUG_PTP_HARD_SOFTWAY - int rnpgbe_ptp_get_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr); int rnpgbe_ptp_set_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr); int rnpgbe_ptp_register(struct rnpgbe_adapter *pf); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h index fe024e13ec8f..0f9960cf6a0c 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h @@ -15,9 +15,8 @@ /* ------------------------------------------*/ /* ==================== RNP-DMA Global Registers ==================== */ -#define RNP10_RING_BASE (0x8000) #define RNP20_RING_BASE (0x8000) -#define RNP500_RING_BASE (0x1000) +#define RNPGBE_RING_BASE (0x1000) #define RING_OFFSET(queue_idx) (0x100 * (queue_idx)) #define RNP_DMA_VERSION (0x0000) #define RNP_DMA_CONFIG (0x0004) @@ -43,6 +42,7 @@ #define RNP_DMA_TX_READY (0x1c) #define RNP_DMA_INT_STAT (0x20) #define RNP_DMA_INT_MASK (0x24) +#define MASK_VALID 0x30000 #define TX_INT_MASK 2 #define RX_INT_MASK 1 #define RNP_DMA_INT_CLR (0x28) @@ -102,13 +102,13 @@ #define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4) /* ================================================================== */ -#define RNP500_NIC_BASE (0x8000) +#define RNPGBE_NIC_BASE (0x8000) -#define RNP500_TOP_NIC_REST_N (0x8010 - RNP500_NIC_BASE) -#define RNP500_TOP_MAC_OUI (0xc004 - RNP500_NIC_BASE) -#define RNP500_TOP_MAC_SN (0xc008 - RNP500_NIC_BASE) +#define RNPGBE_TOP_NIC_REST_N (0x8010 - RNPGBE_NIC_BASE) +#define RNPGBE_TOP_MAC_OUI (0xc004 - RNPGBE_NIC_BASE) +#define RNPGBE_TOP_MAC_SN (0xc008 - RNPGBE_NIC_BASE) -#define RNP500_TOP_NIC_CONFIG (0x0004) +#define RNPGBE_TOP_NIC_CONFIG (0x0004) /* ==================== RNP-ETH Global Registers ==================== */ #define RNP_ETH_BASE (0x10000) @@ -136,100 +136,100 @@ #define DROP_ALL_THRESH (2046) #define RECEIVE_ALL_THRESH (0x270) -#define RNP500_VEB_TBL_CNTS 8 -#define RNP500_DMA_RBUF_FIFO (0x00b0) -#define RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ +#define RNPGBE_VEB_TBL_CNTS 8 +#define RNPGBE_DMA_RBUF_FIFO (0x00b0) +#define RNPGBE_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ (0x10c0 + 4 * (port) + 0x100 * (vf)) -#define RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ +#define RNPGBE_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ (0x10c4 + 4 * (port) + 0x100 * (vf)) -#define RNP500_DMA_PORT_VEB_VID_TBL(port, vf) \ +#define RNPGBE_DMA_PORT_VEB_VID_TBL(port, vf) \ (0x10C8 + 4 * (port) + 0x100 * (vf)) -#define RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ +#define RNPGBE_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ (0x10cc + 4 * (port) + 0x100 * (vf)) -#define RNP500_ETH_BASE (0x10000) - -#define RNP500_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) -#define RNP500_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) -#define RNP500_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) -#define RNP500_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) -#define RNP500_ETH_TUPLE5_POLICY(n) (0xce00 + 0x04 * (n)) - -#define RNP500_ETH_DEFAULT_RX_MIN_LEN (0x80f0) -#define RNP500_ETH_DEFAULT_RX_MAX_LEN (0x80f4) - -#define RNP500_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) -#define RNP500_ETH_ERR_MASK_VECTOR (0x8060) - -#define RNP500_ETH_RSS_MASK (0x3ff0001) -#define RNP500_ETH_ENABLE_RSS_ONLY (0x3f30001) -#define RNP500_ETH_RSS_CONTROL (0x92a0) -#define RNP500_MRQC_IOV_EN (0x92a0) -#define RNP500_IOV_ENABLED BIT(3) -#define RNP500_ETH_DISABLE_RSS (0) -#define RNP500_ETH_SYNQF (0x9290) -#define RNP500_ETH_SYNQF_PRIORITY (0x9294) - -#define RNP500_ETH_FCS_EN (0x804c) -#define RNP500_ETH_HIGH_WATER(n) (0x80c0 + (n) * (0x08)) -#define RNP500_ETH_LOW_WATER(n) (0x80c4 + (n) * (0x08)) -#define RNP500_ETH_WRAP_FIELD_TYPE (0x805c) -#define RNP500_ETH_TX_VLAN_CONTROL_EANBLE (0x0070) -#define RNP500_ETH_TX_VLAN_TYPE (0x0074) -#define RNP500_ETH_RX_MAC_LEN_REG (0x80e0) -#define RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP (0x807c) - -#define RNP500_RAH_AV 0x80000000 -#define RNP500_ETH_RAR_RL(n) (0xa000 + 0x04 * (n)) -#define RNP500_ETH_RAR_RH(n) (0xa400 + 0x04 * (n)) - -#define RNP500_FCTRL_BPE BIT(10) -#define RNP500_FCTRL_UPE BIT(9) -#define RNP500_FCTRL_MPE BIT(8) - -#define RNP500_ETH_DMAC_FCTRL (0x9110) -#define RNP500_ETH_DMAC_MCSTCTRL (0x9114) -#define RNP500_MCSTCTRL_MULTICASE_TBL_EN BIT(4) -#define RNP500_MCSTCTRL_UNICASE_TBL_EN BIT(3) - -#define RNP500_VM_DMAC_MPSAR_RING(entry) \ +#define RNPGBE_ETH_BASE (0x10000) + +#define RNPGBE_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) +#define RNPGBE_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) +#define RNPGBE_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) +#define RNPGBE_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) +#define RNPGBE_ETH_TUPLE5_POLICY(n) (0xce00 + 0x04 * (n)) + +#define RNPGBE_ETH_DEFAULT_RX_MIN_LEN (0x80f0) +#define RNPGBE_ETH_DEFAULT_RX_MAX_LEN (0x80f4) + +#define RNPGBE_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) +#define RNPGBE_ETH_ERR_MASK_VECTOR (0x8060) + +#define RNPGBE_ETH_RSS_MASK (0x3ff0001) +#define RNPGBE_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNPGBE_ETH_RSS_CONTROL (0x92a0) +#define RNPGBE_MRQC_IOV_EN (0x92a0) +#define RNPGBE_IOV_ENABLED BIT(3) +#define RNPGBE_ETH_DISABLE_RSS (0) +#define RNPGBE_ETH_SYNQF (0x9290) +#define RNPGBE_ETH_SYNQF_PRIORITY (0x9294) + +#define RNPGBE_ETH_FCS_EN (0x804c) +#define RNPGBE_ETH_HIGH_WATER(n) (0x80c0 + (n) * (0x08)) +#define RNPGBE_ETH_LOW_WATER(n) (0x80c4 + (n) * (0x08)) +#define RNPGBE_ETH_WRAP_FIELD_TYPE (0x805c) +#define RNPGBE_ETH_TX_VLAN_CONTROL_EANBLE (0x0070) +#define RNPGBE_ETH_TX_VLAN_TYPE (0x0074) +#define RNPGBE_ETH_RX_MAC_LEN_REG (0x80e0) +#define RNPGBE_ETH_WHOLE_PKT_LEN_ERR_DROP (0x807c) + +#define RNPGBE_RAH_AV 0x80000000 +#define RNPGBE_ETH_RAR_RL(n) (0xa000 + 0x04 * (n)) +#define RNPGBE_ETH_RAR_RH(n) (0xa400 + 0x04 * (n)) + +#define RNPGBE_FCTRL_BPE BIT(10) +#define RNPGBE_FCTRL_UPE BIT(9) +#define RNPGBE_FCTRL_MPE BIT(8) + +#define RNPGBE_ETH_DMAC_FCTRL (0x9110) +#define RNPGBE_ETH_DMAC_MCSTCTRL (0x9114) +#define RNPGBE_MCSTCTRL_MULTICASE_TBL_EN BIT(4) +#define RNPGBE_MCSTCTRL_UNICASE_TBL_EN BIT(3) + +#define RNPGBE_VM_DMAC_MPSAR_RING(entry) \ (0xb400 + (4 * (entry))) -#define RNP500_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * (n)) +#define RNPGBE_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * (n)) -#define RNP500_ETH_RSS_KEY (0x92d0) +#define RNPGBE_ETH_RSS_KEY (0x92d0) -#define RNP500_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) +#define RNPGBE_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) -#define RNP500_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) +#define RNPGBE_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) -#define RNP500_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) -#define RNP500_VFTA RNP500_ETH_VLAN_FILTER_TABLE +#define RNPGBE_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) +#define RNPGBE_VFTA RNPGBE_ETH_VLAN_FILTER_TABLE -#define RNP500_VLVF(idx) (0xb600 + 4 * (idx)) -#define RNP500_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) -#define RNP500_ETH_VLAN_FILTER_ENABLE (0x9118) -#define RNP500_PRIORITY_1_MARK (0x8080) -#define RNP500_PRIORITY_1 (400) -#define RNP500_PRIORITY_0 (300) -#define RNP500_PRIORITY_0_MARK (0x8084) -#define RNP500_PRIORITY_EN (0x8088) +#define RNPGBE_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNPGBE_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) +#define RNPGBE_ETH_VLAN_FILTER_ENABLE (0x9118) +#define RNPGBE_PRIORITY_1_MARK (0x8080) +#define RNPGBE_PRIORITY_1 (400) +#define RNPGBE_PRIORITY_0 (300) +#define RNPGBE_PRIORITY_0_MARK (0x8084) +#define RNPGBE_PRIORITY_EN (0x8088) -#define RNP500_PRIORITY_EN_8023 (0x808c) +#define RNPGBE_PRIORITY_EN_8023 (0x808c) -#define RNP500_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) -#define RNP500_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) +#define RNPGBE_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) +#define RNPGBE_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) -#define RNP500_ETH_BYPASS (0x8000) -#define RNP500_ETH_ERR_MASK_VECTOR (0x8060) -#define RNP500_ETH_PRIV_DATA_CONTROL_REG (0x8068) -#define RNP500_ETH_DEFAULT_RX_RING (0x806c) +#define RNPGBE_ETH_BYPASS (0x8000) +#define RNPGBE_ETH_ERR_MASK_VECTOR (0x8060) +#define RNPGBE_ETH_PRIV_DATA_CONTROL_REG (0x8068) +#define RNPGBE_ETH_DEFAULT_RX_RING (0x806c) -#define RNP500_ETH_DOUBLE_VLAN_DROP (0x8078) +#define RNPGBE_ETH_DOUBLE_VLAN_DROP (0x8078) -#define RNP500_HOST_FILTER_EN (0x800c) -#define RNP500_BAD_PACKETS_RECEIVE_EN (0x8024) -#define RNP500_REDIR_EN (0x8030) +#define RNPGBE_HOST_FILTER_EN (0x800c) +#define RNPGBE_BAD_PACKETS_RECEIVE_EN (0x8024) +#define RNPGBE_REDIR_EN (0x8030) #define WATCHDOG_TIMER_ERROR BIT(0) #define RUN_FRAME_ERROR BIT(1) #define GAINT_FRAME_ERROR BIT(2) @@ -241,44 +241,136 @@ #define DA_FILTER_ERROR BIT(9) #define SA_FILTER_ERROR BIT(10) -#define RNP500_MAC_ERR_MASK (0x8034) -#define RNP500_ETH_SCTP_CHECKSUM_EN (0x8038) -#define RNP500_ETH_VLAN_RM_TYPE (0x8054) -#define RNP500_ETH_EXCEPT_DROP_PROC (0x0470) -#define RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH (0x8098) -#define RNP500_ETH_TX_MUX_DROP (0x98) - -#define RNP500_VEB_VFMPRC(n) (0x4018 + 0x100 * (n)) -#define RNP500_VEB_VFBPRC(n) (0x401c + 0x100 * (n)) -#define RNP500_RX_TIMEOUT_DROP(n) (0x404c + 0x100 * (n)) -#define RNP500_STATISTIC_CRL(n) (0x4048 + 0x100 * (n)) -#define RNP500_RX_MULTI_PKT_NUM (0x8224) -#define RNP500_RX_BROAD_PKT_NUM (0x8228) -#define RNP500_RX_MAC_CUT_NUM (0x8304) -#define RNP500_RX_MAC_LCS_ERR_NUM (0x8308) -#define RNP500_RX_MAC_LEN_ERR_NUM (0X830C) -#define RNP500_RX_MAC_SLEN_ERR_NUM (0x8310) -#define RNP500_RX_MAC_GLEN_ERR_NUM (0x8314) -#define RNP500_RX_MAC_FCS_ERR_NUM (0x8318) -#define RNP500_RX_MAC_SFCS_ERR_NUM (0x831c) -#define RNP500_RX_MAC_GFCS_ERR_NUM (0x8320) - -#define RNP500_TX_MULTI_NUM (0x214) -#define RNP500_TX_BROADCAST_NUM (0x218) - -#define RNP500_RX_DROP_PKT_NUM (0X8230) -#define RNP500_RXTRANS_DROP (0x8908) -#define RNP500_RXTRANS_CUT_ERR_PKTS (0x894c) -#define RNP500_DECAP_PKT_DROP1_NUM (0X82ec) -#define RNP500_MAC_COUNT_CONTROL (0x0100) -#define RNP500_MAC_GLEN_ERR_NUM (0X01a8) -#define RNP500_RX_DEBUG(n) (0x8400 + 0x04 * (n)) -#define RNP500_ETH_HOST_L2_DROP_PKTS RNP500_RX_DEBUG(4) -#define RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP500_RX_DEBUG(5) -#define RNP500_ETH_ETYPE_DROP_PKTS RNP500_RX_DEBUG(6) -#define RNP500_ETH_TCP_SYN_DROP_PKTS RNP500_RX_DEBUG(7) -#define RNP500_ETH_REDIR_TUPLE5_DROP_PKTS RNP500_RX_DEBUG(8) - +#define RNPGBE_MAC_ERR_MASK (0x8034) +#define RNPGBE_ETH_SCTP_CHECKSUM_EN (0x8038) +#define RNPGBE_ETH_VLAN_RM_TYPE (0x8054) +#define RNPGBE_ETH_EXCEPT_DROP_PROC (0x0470) +#define RNPGBE_ETH_EMAC_PARSE_PROGFULL_THRESH (0x8098) +#define RNPGBE_ETH_TX_MUX_DROP (0x98) + +#define RNPGBE_VEB_VFMPRC(n) (0x4018 + 0x100 * (n)) +#define RNPGBE_VEB_VFBPRC(n) (0x401c + 0x100 * (n)) +#define RNPGBE_RX_TIMEOUT_DROP(n) (0x404c + 0x100 * (n)) +#define RNPGBE_STATISTIC_CRL(n) (0x4048 + 0x100 * (n)) +#define RNPGBE_RX_MULTI_PKT_NUM (0x8224) +#define RNPGBE_RX_BROAD_PKT_NUM (0x8228) +#define RNPGBE_RX_MAC_CUT_NUM (0x8304) +#define RNPGBE_RX_MAC_LCS_ERR_NUM (0x8308) +#define RNPGBE_RX_MAC_LEN_ERR_NUM (0X830C) +#define RNPGBE_RX_MAC_SLEN_ERR_NUM (0x8310) +#define RNPGBE_RX_MAC_GLEN_ERR_NUM (0x8314) +#define RNPGBE_RX_MAC_FCS_ERR_NUM (0x8318) +#define RNPGBE_RX_MAC_SFCS_ERR_NUM (0x831c) +#define RNPGBE_RX_MAC_GFCS_ERR_NUM (0x8320) + +#define RNPGBE_TX_MULTI_NUM (0x214) +#define RNPGBE_TX_BROADCAST_NUM (0x218) + +#define RNPGBE_RX_DROP_PKT_NUM (0X8230) +#define RNPGBE_RXTRANS_DROP (0x8908) +#define RNPGBE_RXTRANS_LCS_ERR_NUM (0x8924) +#define RNPGBE_RXTRANS_LEN_ERR_NUM (0x8928) +#define RNPGBE_RXTRANS_SLEN_ERR_NUM (0x8934) +#define RNPGBE_RXTRANS_GLEN_ERR_NUM (0x8938) +#define RNPGBE_RXTRANS_CUT_ERR_PKTS (0x894c) +#define RNPGBE_RXTRANS_EXCEPT_NUM (0x8950) +#define RNPGBE_RXTRANS_FCS_ERR_NUM (0x8954) +#define RNPGBE_DECAP_PKT_DROP1_NUM (0X82ec) +#define RNPGBE_MAC_COUNT_CONTROL (0x0100) +#define RNPGBE_MAC_GLEN_ERR_NUM (0X01a8) +#define RNPGBE_RX_DEBUG(n) (0x8400 + 0x04 * (n)) +#define RNPGBE_ETH_HOST_L2_DROP_PKTS RNPGBE_RX_DEBUG(4) +#define RNPGBE_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNPGBE_RX_DEBUG(5) +#define RNPGBE_ETH_ETYPE_DROP_PKTS RNPGBE_RX_DEBUG(6) +#define RNPGBE_ETH_TCP_SYN_DROP_PKTS RNPGBE_RX_DEBUG(7) +#define RNPGBE_ETH_REDIR_TUPLE5_DROP_PKTS RNPGBE_RX_DEBUG(8) + +// tx status in hw +#define RNPGBE_ETH_3TO1_HOST (0x200) +#define RNPGBE_ETH_3TO1_SW (0x204) +#define RNPGBE_ETH_3TO1_BMC (0x208) +#define RNPGBE_ETH_3TO1_OUT (0x210) +#define RNPGBE_ETH_OUT_MULTIPLE (0x214) +#define RNPGBE_ETH_OUT_BROADCAST (0x218) +#define RNPGBE_ETH_OUT_PTP (0x21c) +#define RNPGBE_ETH_OUT_DROP (0x220) +#define RNPGBE_ETH_TX_TRANS (0x250) +#define RNPGBE_ETH_TX_TRANS_STATUS_0 (0x120) +#define RNPGBE_ETH_TX_TRANS_STATUS_1 (0x124) +#define RNPGBE_ETH_TX_TRANS_SOP (0x300) +#define RNPGBE_ETH_TX_TRANS_EOP (0x304) + +/* rx status in hw */ +/* rx trans */ +#define RNPGBE_ETH_PKTS_IN (0x8900) +#define RNPGBE_ETH_PKTS_OUT (0x8904) +#define RNPGBE_ETH_PKTS_DRIP (0x8908) +#define RNPGBE_ETH_PKTS_IN_ETH2 (0x890c) +#define RNPGBE_ETH_PKTS_IN_8023 (0x8910) +#define RNPGBE_ETH_PKTS_IN_CONTROL (0x8914) +#define RNPGBE_ETH_PKTS_IN_UDP (0x8918) +#define RNPGBE_ETH_PKTS_IN_TCP (0x891c) +#define RNPGBE_ETH_PKTS_IN_ICMP (0x8920) +#define RNPGBE_ETH_PKTS_IN_LCS_ERR (0x8924) +#define RNPGBE_ETH_PKTS_IN_LEN_ERR (0x8928) +#define RNPGBE_ETH_PKTS_IN_DMAC_F (0x892c) +#define RNPGBE_ETH_PKTS_IN_SMAC_F (0x8930) +#define RNPGBE_ETH_PKTS_IN_SLEN_ERR (0x8934) +#define RNPGBE_ETH_PKTS_IN_GLEN_ERR (0x8938) +#define RNPGBE_ETH_PKTS_IN_IPH_ERR (0x893c) +#define RNPGBE_ETH_PKTS_IN_PAYLOAD_ERR (0x8940) +#define RNPGBE_ETH_PKTS_IN_IPV4 (0x8944) +#define RNPGBE_ETH_PKTS_IN_IPV6 (0x8948) +#define RNPGBE_ETH_PKTS_IN_CUT_ERR (0x894c) +#define RNPGBE_ETH_PKTS_IN_EXCEPT_BYTES (0x8950) +#define RNPGBE_ETH_PKTS_IN_FCS_ERR (0x8954) +#define RNPGBE_ETH_PKTS_IN_MAC_LEN_ERR (0x8958) + +#define RNPGBE_GATHER_PKTS_IN (0x8240) +#define RNPGBE_GATHER_PKTS_OUT (0x8220) +#define RNPGBE_GATHER_PKTS_OUT_MUL (0x8224) +#define RNPGBE_GATHER_PKTS_OUT_BRO (0x8228) +#define RNPGBE_GATHER_PKTS_IN_DROP (0x8230) +#define RNPGBE_GATHER_PKTS_IN_MAC_CUT (0x8304) +#define RNPGBE_GATHER_PKTS_IN_MAC_LCS_ERR (0x8308) +#define RNPGBE_GATHER_PKTS_IN_MAC_LEN_ERR (0x830c) +#define RNPGBE_GATHER_PKTS_IN_MAC_SLEN_ERR (0x8310) +#define RNPGBE_GATHER_PKTS_IN_MAC_GLEN_ERR (0x8314) +#define RNPGBE_GATHER_PKTS_IN_MAC_FCS_ERR (0x8318) +#define RNPGBE_GATHER_PKTS_IN_SMALL_64 (0x831c) +#define RNPGBE_GATHER_PKTS_IN_LARGE_64 (0x8320) +/* pip parse */ +#define RNPGBE_PARSE_PKTS_IN (0x8290) +#define RNPGBE_PARSE_PKTS_OUT (0x8294) +#define RNPGBE_PARSE_PKTS_ARP_REQUEST (0x8250) +#define RNPGBE_PARSE_PKTS_ARP_RESPONS (0x8254) +#define RNPGBE_PARSE_PKTS_ICMP (0x8258) +#define RNPGBE_PARSE_PKTS_UDP (0x825c) +#define RNPGBE_PARSE_PKTS_TCP (0x8260) +#define RNPGBE_PARSE_PKTS_ARP_CUT (0x8264) +#define RNPGBE_PARSE_PKTS_ND_CUT (0x8268) +#define RNPGBE_PARSE_PKTS_SCTP (0x826c) +#define RNPGBE_PARSE_PKTS_TCP_SYN (0x8270) +#define RNPGBE_PARSE_PKTS_FRAGMENT (0x827c) +#define RNPGBE_PARSE_PKTS_1_VLAN (0x8280) +#define RNPGBE_PARSE_PKTS_2_VLANS (0x8284) +#define RNPGBE_PARSE_PKTS_IPV4 (0x8288) +#define RNPGBE_PARSE_PKTS_IPV6 (0x828c) +#define RNPGBE_PARSE_PKTS_IP_HDR_ERR (0x8298) +#define RNPGBE_PARSE_PKTS_IP_PKT_ERR (0x829c) +#define RNPGBE_PARSE_PKTS_L3_HDR_CHK_ERR (0x82a0) +#define RNPGBE_PARSE_PKTS_L4_HDR_CHK_ERR (0x82a4) +#define RNPGBE_PARSE_PKTS_SCTP_HDR_CHK_ERR (0x82a8) +#define RNPGBE_PARSE_PKTS_VLAN_ERR (0x82ac) +#define RNPGBE_PARSE_PKTS_RDMA (0x82b0) +#define RNPGBE_PARSE_PKTS_ARP_AUTO_RESP (0x82b4) +#define RNPGBE_PARSE_PKTS_ICMPV6 (0x82b8) +#define RNPGBE_PARSE_PKTS_IPV6_EXTEND (0x82bc) +#define RNPGBE_PARSE_PKTS_8023 (0x82c0) +#define RNPGBE_PARSE_PKTS_EXCEPT_SHORT (0x82c4) +#define RNPGBE_PARSE_PKTS_PTP (0x82c8) +#define RNPGBE_PARSE_PKTS_NS_REQ (0x8274) +#define RNPGBE_PARSE_PKTS_NS_NA_AUTO_RES (0x8278) /* ================================================================== */ #define ETH_ERR_SCTP BIT(4) #define ETH_ERR_L4 BIT(3) @@ -575,13 +667,13 @@ #define RNP_COMM_REG0 0x30000 #define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000) -#define RNP500_PHY_RELEASE (0x30000) -#define RNP500_TP_SFP (0x30200) -#define RNP500_TOP_NIC_VERSION (0x8000 + 0x0000) -#define RNP500_FPGA_VERSION (0x8020) -#define RNP500_FPGA_TIME (0x8024) -#define RNP500_LEGANCY_TIME (0xd000) -#define RNP500_LEGANCY_ENABLE (0xd004) +#define RNPGBE_PHY_RELEASE (0x30000) +#define RNPGBE_TP_SFP (0x30200) +#define RNPGBE_TOP_NIC_VERSION (0x8000 + 0x0000) +#define RNPGBE_FPGA_VERSION (0x8020) +#define RNPGBE_FPGA_TIME (0x8024) +#define RNPGBE_LEGANCY_TIME (0xd000) +#define RNPGBE_LEGANCY_ENABLE (0xd004) #define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004) #define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008) #define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c) @@ -627,63 +719,10 @@ /* ================================================================== */ /* ==================== RNP-MAC Global Registers ==================== */ -//=== MAC Registers== -#define RNP10_MAC_BASE (0x60000) -#define RNP_XLMAC (0x60000) - -#define RNP10_MAC_TX_CFG (0x0000) -#define RNP10_MAC_RX_CFG (0x0004) -#define RNP_RX_ALL BIT(31) -#define RNP_RX_ALL_MUL BIT(4) -#define RNP10_MAC_PKT_FLT (0x0008) -#define RNP10_MAC_LPI_CTRL (0x00d0) - -#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i)) -#define RNP10_MAC_RX_FLOW_CTRL (0x0090) - -#define RNP10_TX_FLOW_ENABLE_MASK (0x2) -#define RNP10_RX_FLOW_ENABLE_MASK (0x1) - -#define RNP10_MAC_TX_VLAN_TAG (0x0050) -#define RNP10_MAC_TX_VLAN_MODE (0x0060) -#define RNP10_MAC_INNER_VLAN_INCL (0x0064) - -#define RNP10_MAC_UNICAST_LOW(i) (0x304 + (i) * 0x08) -#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + (i) * 0x08) - -#define RNP500_MAC_BASE (0x20000) - -#define RNP_MODE_NO_SA_INSER (0x0) -#define RNP_SARC_OFFSET (28) -#define RNP_TWOKPE_MASK BIT(27) -#define RNP_SFTERR_MASK BIT(26) -#define RNP_CST_MASK BIT(25) -#define RNP_TC_MASK BIT(24) -#define RNP_WD_MASK BIT(23) -#define RNP_JD_MASK BIT(22) -#define RNP_BE_MASK BIT(21) -#define RNP_JE_MASK BIT(20) -#define RNP_IFG_96 (0x00) -#define RNP_IFG_OFFSET (17) -#define RNP_DCRS_MASK BIT(16) -#define RNP_PS_MASK BIT(15) -#define RNP_FES_MASK BIT(14) -#define RNP_DO_MASK BIT(13) -#define RNP_LM_MASK BIT(12) -#define RNP_DM_MASK BIT(11) -#define RNP_IPC_MASK BIT(10) -#define RNP_DR_MASK BIT(9) -#define RNP_LUD_MASK BIT(8) -#define RNP_ACS_MASK BIT(7) -#define RNP_BL_MODE (0x00) -#define RNP_BL_OFFSET (5) -#define RNP_DC_MASK BIT(4) -#define RNP_TE_MASK BIT(3) -#define RNP_RE_MASK BIT(2) -#define RNP_PRELEN_MODE (0) - -#define RNP500_MAC_UNICAST_LOW(i) (0x44 + (i) * 0x08) -#define RNP500_MAC_UNICAST_HIGH(i) (0x40 + (i) * 0x08) +#define RNPGBE_MAC_BASE (0x20000) + +#define RNPGBE_MAC_UNICAST_LOW(i) (0x44 + (i) * 0x08) +#define RNPGBE_MAC_UNICAST_HIGH(i) (0x40 + (i) * 0x08) #define GMAC_CONTROL 0x00000000 /* Configuration */ #define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ @@ -782,6 +821,8 @@ enum power_event { #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ #define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4) +#define GMAC_MANAGEMENT_TX_PAUSE (0x170) +#define GMAC_MANAGEMENT_RX_PAUSE (0x1D0) #define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000) #define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004) #define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008) @@ -805,12 +846,6 @@ enum power_event { #define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14) #define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18) #define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20) -#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4) - -#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918) -#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c) -#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920) -#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924) #define RNP_TX_FLOW_ENABLE_MASK (0x2) #define RNP_RX_FLOW_ENABLE_MASK (0x1) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c index e4c97df17829..314b21c9274e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c @@ -63,6 +63,18 @@ static int rsp_hal_sfc_flash_erase_sector_internal(u8 __iomem *hw_addr, return HAL_OK; } +int rsp_hal_sfc_write_protect(struct rnpgbe_hw *hw, u32 value) +{ + rsp_hal_sfc_flash_write_enable(hw->hw_addr); + + iowrite32(CMD_CYCLE(8), (hw->hw_addr + 0x10)); + iowrite32(WR_DATA_CYCLE(8), (hw->hw_addr + 0x14)); + iowrite32((value << 24), (hw->hw_addr + 0x04)); + rsp_hal_sfc_command(hw->hw_addr, CMD_WRITE_STATUS); + + return 0; +} + int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size) { u32 addr = SFC_MEM_BASE; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h index a9dbc324bced..db09deb3caea 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h @@ -15,11 +15,12 @@ #define CMD_WRITE_DISABLE 0x04000000 #define CMD_READ_STATUS 0x05000000 +#define CMD_WRITE_STATUS 0x01000000 #define CMD_WRITE_ENABLE 0x06000000 #define CMD_SECTOR_ERASE 0x20000000 #define CMD_BLOCK_ERASE_64K 0xd8000000 -#define SFCADDR(a) ((a) << 8) +#define SFCADDR(a) ((a) << 8) #define CMD_CYCLE(c) (((c) & 0xff) << 0) #define RD_DATA_CYCLE(c) (((c) & 0xff) << 8) #define WR_DATA_CYCLE(c) (((c) & 0xff) << 0) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c index 4de5bbe4710b..c87704f75ffb 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c @@ -227,6 +227,7 @@ static bool rnpgbe_vfs_are_assigned(struct rnpgbe_adapter *adapter) int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct rnpgbe_hw *hw = &adapter->hw; int rss; int time = 0; @@ -234,14 +235,32 @@ int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter) if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) return 0; + if (pci_channel_offline(adapter->pdev) == false) { + if (!hw->ncsi_en) + hw->ops.set_mac_rx(hw, false); + hw->ops.set_sriov_status(hw, false); + + if (netif_carrier_ok(netdev)) + netif_carrier_off(netdev); + } + +#ifdef CONFIG_PCI_IOV + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (rnpgbe_vfs_are_assigned(adapter)) + return -EPERM; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + adapter->num_vfs = 0; adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED; adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE; adapter->flags &= ~RNP_FLAG_VF_INIT_DONE; adapter->vlan_count = 0; msleep(100); - hw->ops.set_mac_rx(hw, false); - hw->ops.set_sriov_status(hw, false); /* set num VFs to 0 to prevent access to vfinfo */ while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { @@ -267,20 +286,6 @@ int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter) kfree(adapter->mv_list); adapter->mv_list = NULL; -#if IS_ENABLED(CONFIG_PCI_IOV) - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (rnpgbe_vfs_are_assigned(adapter)) { - e_dev_warn("Unloading driver while VFs are assigned - VFs will not be"); - e_dev_warn(" deallocated\n"); - return -EPERM; - } - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(adapter->pdev); -#endif /* CONFIG_PCI_IOV */ - /* set default pool back to 0 */ /* Disable VMDq flag so device will be set in VM mode */ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) @@ -648,6 +653,8 @@ static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf) u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN]; u8 *addr = (u8 *)(&msgbuf[1]); + /* force close rx start for this vf */ + writel(0, hw->hw_addr + 0x1010 + 0x100 * vf); /* reset the filters for the device */ rnpgbe_vf_reset_event(adapter, vf); @@ -655,9 +662,6 @@ static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf) if (!is_zero_ether_addr(vf_mac)) rnpgbe_set_vf_mac(adapter, vf, vf_mac); - /* enable VF mailbox for further messages */ - adapter->vfinfo[vf].clear_to_send = true; - /* Enable counting of spoofed packets in the SSVPC register */ /* reply to reset with ack and vf mac address */ @@ -687,9 +691,9 @@ static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf) else msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8); /* mc_type */ - msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03; + msgbuf[RNP_VF_MC_TYPE_WORD] |= hw_rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03; - msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION); + msgbuf[RNP_VF_DMA_VERSION_WORD] = hw_rd32(hw, RNP_DMA_VERSION); msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan; /* fixme tx fetch to be added here */ @@ -709,6 +713,7 @@ static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf) } msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount; + msgbuf[RNP_VF_FEATURE] = 0; if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER; if (hw->ncsi_en) @@ -717,6 +722,9 @@ static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf) /* now vf maybe has no irq handler if it is the first reset*/ rnpgbe_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf); + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + return 0; } @@ -856,7 +864,7 @@ static int rnpgbe_get_vf_reg(struct rnpgbe_adapter *adapter, u32 *msgbuf, { u32 reg = msgbuf[1]; - msgbuf[1] = rd32(&adapter->hw, reg); + msgbuf[1] = hw_rd32(&adapter->hw, reg); return 0; } @@ -923,7 +931,7 @@ static int rnpgbe_vf_get_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf, struct rnpgbe_hw *hw = &adapter->hw; struct rnpgbe_dma_info *dma = &hw->dma; - if (dma_rd32(dma, RNP500_STATISTIC_CRL(vf))) + if (dma_rd32(dma, RNPGBE_STATISTIC_CRL(vf))) msgbuf[1] = 1; else msgbuf[1] = 0; @@ -938,9 +946,9 @@ static int rnpgbe_vf_set_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf, struct rnpgbe_dma_info *dma = &hw->dma; if (msgbuf[1]) - dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 1); + dma_wr32(dma, RNPGBE_STATISTIC_CRL(vf), 1); else - dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 0); + dma_wr32(dma, RNPGBE_STATISTIC_CRL(vf), 0); return 0; } @@ -954,15 +962,7 @@ static int rnpgbe_get_vf_queues(struct rnpgbe_adapter *adapter, u32 *msgbuf, msgbuf[RNP_VF_RX_QUEUES] = hw->sriov_ring_limit; msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan; msgbuf[RNP_VF_DEF_QUEUE] = 0; - if (hw->hw_type == rnpgbe_hw_n400) { - /* n400, we use */ - /* vf0 use ring4 */ - /* vf1 use ring8 */ - msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4; - - } else { - msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit; - } + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit; msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) | adapter->rx_ring_item_count; @@ -1053,6 +1053,7 @@ static int rnpgbe_rcv_msg_from_vf(struct rnpgbe_adapter *adapter, u32 vf) case RNP_PF_REMOVE: vf_dbg("vf %d removed\n", vf); adapter->vfinfo[vf].clear_to_send = false; + adapter->vfinfo[vf].get_mtu_done = false; adapter->vfinfo[vf].vf_vlan = 0; retval = 1; break; @@ -1089,6 +1090,8 @@ static int rnpgbe_rcv_msg_from_vf(struct rnpgbe_adapter *adapter, u32 vf) if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE) rnpgbe_write_mbx(hw, msgbuf, mbx_size, vf); + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_GET_MTU) + adapter->vfinfo[vf].get_mtu_done = true; return retval; } @@ -1272,7 +1275,7 @@ int rnpgbe_setup_ring_maxrate(struct rnpgbe_adapter *adapter, int ring, { struct rnpgbe_hw *hw = &adapter->hw; struct rnpgbe_dma_info *dma = &hw->dma; - int samples_1sec = adapter->hw.usecstocount * 1000000; + int samples_1sec = adapter->hw.usecstocount * 100000; dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec); @@ -1341,6 +1344,16 @@ int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, int err = 0; struct rnpgbe_adapter *adapter = netdev_priv(netdev); + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "set vf vlan failed, vf %d has not been initialized yet, please retry\n", + vf); + return -EINVAL; + } + + if (!adapter->vfinfo[vf].get_mtu_done) + return -EINVAL; + /* VLAN IDs accepted range 0-4094 */ if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || qos > 7) @@ -1395,6 +1408,13 @@ int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) if (vf < 0 || vf >= adapter->num_vfs) return -EINVAL; + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "set vf vlan failed, vf %d has not been initialized yet, please retry\n", + vf); + return -EINVAL; + } + /* maybe we not support this in hw */ adapter->vfinfo[vf].spoofchk_enabled = setting; @@ -1430,6 +1450,16 @@ int rnpgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) goto out; } + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "set vf vlan failed, vf %d has not been initialized yet, please retry\n", + vf); + return -EINVAL; + } + + if (!adapter->vfinfo[vf].get_mtu_done) + return -EINVAL; + switch (state) { case IFLA_VF_LINK_STATE_ENABLE: dev_info(&adapter->pdev->dev, "NDO set VF %d link state %d\n", @@ -1475,6 +1505,16 @@ int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, if (vf >= hw->max_vfs - 1) return -EINVAL; + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "set vf vlan failed, vf %d has not has not been initialized yet, please retry\n", + vf); + return -EINVAL; + } + + if (!adapter->vfinfo[vf].get_mtu_done) + return -EINVAL; + switch (adapter->link_speed) { case RNP_LINK_SPEED_40GB_FULL: link_speed = 40000; @@ -1492,15 +1532,23 @@ int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, link_speed = 100; break; } - /* rate limit cannot be less than 1Mbs or greater than link speed */ - if (max_tx_rate && (max_tx_rate <= 1 || max_tx_rate > link_speed)) + + /* rate limit cannot be greater than link speed */ + if (max_tx_rate && max_tx_rate > link_speed) return -EINVAL; adapter->vfinfo[vf].tx_rate = max_tx_rate; ring_max_rate = max_tx_rate / hw->sriov_ring_limit; - real_rate = (ring_max_rate * 1024 * 128); + if (max_tx_rate <= 10) + real_rate = (ring_max_rate * 1000 * 85) >> 3; + else if (max_tx_rate <= 50) + real_rate = (ring_max_rate * 1000 * 90) >> 3; + else if (max_tx_rate <= 100) + real_rate = (ring_max_rate * 1000 * 94) >> 3; + else + real_rate = (ring_max_rate * 1000 * 99) >> 3; vf_ring = rnpgbe_get_vf_ringnum(hw, vf, 0); rnpgbe_setup_ring_maxrate(adapter, vf_ring, real_rate); @@ -1514,6 +1562,17 @@ int rnpgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) return -EINVAL; + + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) { + dev_err(pci_dev_to_dev(adapter->pdev), + "set vf vlan failed, vf %d has not been initialized yet, please retry\n", + vf); + return -EINVAL; + } + + if (!adapter->vfinfo[vf].get_mtu_done) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c index aab4033fb998..bac48195e87a 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c @@ -8,12 +8,14 @@ #include #include #include +#include #include "rnpgbe.h" #include "rnpgbe_common.h" #include "rnpgbe_type.h" #include "rnpgbe_mbx.h" #include "rnpgbe_mbx_fw.h" +#include "version.h" #ifdef RNPGBE_HWMON #include @@ -76,7 +78,7 @@ static ssize_t rnpgbe_hwmon_show_location(struct device __always_unused *dev, static ssize_t rnpgbe_hwmon_show_name(struct device __always_unused *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "rnp\n"); + return snprintf(buf, PAGE_SIZE, "rnpgbe\n"); } static ssize_t rnpgbe_hwmon_show_temp(struct device __always_unused *dev, @@ -231,6 +233,68 @@ static void n500_exchange_share_ram(struct rnpgbe_hw *hw, u32 *buf, int flag, in } } +static void n210_clean_share_ram(struct rnpgbe_hw *hw) +{ + struct rnpgbe_mbx_info *mbx = &hw->mbx; + int len = mbx->share_size; + int i; + + for (i = 0; i < len; i = i + 4) + rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i, + 0xffffffff); +} + +static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data, int len) +{ + struct crc32_info *info = (struct crc32_info *)(data + CRC_OFFSET); + u32 crc32 = 0xffffffff; + u32 crc32_goal; + u32 device_id; + int ret = 0; + + /* if too small, maybe from tools, not firmware */ + if (len < 1024) + return ret; + + if (info->magic == CRC32_MAGIC) { + crc32_goal = info->crc32; + info->crc32 = 0; + info->magic = 0; + + crc32 = crc32_le(crc32, data, len); + if (crc32 != crc32_goal) + return -1; + info->magic = CRC32_MAGIC; + info->crc32 = crc32_goal; + } + + device_id = *((u16 *)data + 30); + + /* if no device_id no check */ + if (device_id == 0 || device_id == 0xffff) + return 0; + + switch (hw->hw_type) { + case rnpgbe_hw_n500: + if (device_id != 0x8308) + ret = 1; + break; + case rnpgbe_hw_n210: + if (device_id != 0x8208) + ret = 1; + break; + case rnpgbe_hw_n210L: + if (device_id != 0x820a) + ret = 1; + break; + + default: + ret = 1; + } + + return ret; +} + static ssize_t maintain_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) @@ -286,11 +350,33 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, int offset; struct rnpgbe_mbx_info *mbx = &hw->mbx; + if (req->cmd == 1) { + if (check_fw_type(hw, (u8 *)(dma_buf + sizeof(*req)), + req->req_data_bytes)) { + err = -EINVAL; + goto err_quit; + } + } + if (req->cmd) { int data_len; int ram_size = mbx->share_size; offset = 0; + + if (req->req_data_bytes > ram_size && req->cmd == 1) + offset += ram_size; + /* if n210 first clean header */ + if (hw->hw_type == rnpgbe_hw_n210 || + hw->hw_type == rnpgbe_hw_n210L) { + n210_clean_share_ram(hw); + err = rnpgbe_maintain_req(hw, req->cmd, + req->arg0, + 0, 0, 0); + if (err != 0) + goto err_quit; + } + while (offset < req->req_data_bytes) { data_len = (req->req_data_bytes - offset) > ram_size ? ram_size : @@ -306,11 +392,26 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, offset += data_len; } + + if (req->req_data_bytes > ram_size && req->cmd == 1) { + u32 *data = (u32 *)dma_buf + sizeof(*req); + + data_len = ram_size; + /* copy to ram */ + n500_exchange_share_ram(hw, data, + 1, data_len); + err = rnpgbe_maintain_req(hw, req->cmd, + req->arg0, + offset, 0, 0); + if (err != 0) + goto err_quit; + } } else { int data_len; int ram_size = mbx->share_size; struct maintain_reply reply; /* it is a read */ + adapter->maintain_buf_len = (reply_bytes + 3) & (~3); adapter->maintain_buf = kmalloc(adapter->maintain_buf_len, GFP_KERNEL); if (!adapter->maintain_buf) { @@ -324,6 +425,7 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, reply.data_bytes = req->reply_bytes; memcpy(adapter->maintain_buf, &reply, sizeof(struct maintain_reply)); + reply_bytes = reply_bytes - sizeof(*req); /* copy req first */ offset = 0; while (offset < reply_bytes) { @@ -359,6 +461,31 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, } static BIN_ATTR(maintain, 0644, maintain_read, maintain_write, 1 * 1024 * 1024); + +static ssize_t version_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnpgbe_adapter *adapter = netdev_priv(netdev); + struct rnpgbe_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "drver: %s %s\n", + rnpgbe_driver_version, GIT_COMMIT); + + ret += sprintf(buf + ret, "fw : %d.%d.%d.%d 0x%08x\n", + ((char *)&hw->fw_version)[3], + ((char *)&hw->fw_version)[2], + ((char *)&hw->fw_version)[1], + ((char *)&hw->fw_version)[0], + hw->bd_uid | (hw->sfc_boot ? 0x80000000 : 0) | + (hw->pxe_en ? 0x40000000 : 0) | + (hw->ncsi_en ? 0x20000000 : 0) | + (hw->trim_valid ? 0x10000000 : 0)); + + return ret; +} + static ssize_t rx_desc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -742,103 +869,6 @@ static ssize_t rx_ring_info_store(struct device *dev, return ret; } -static ssize_t mii_reg_info_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *netdev = to_net_device(dev); - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - int ret = count; - - u32 reg_num; - - if (kstrtou32(buf, 0, ®_num) != 0) - return -EINVAL; - adapter->sysfs_mii_reg = reg_num; - - return ret; -} - -static ssize_t mii_control_info_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *netdev = to_net_device(dev); - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - int ret = count; - - u32 reg_num; - - if (kstrtou32(buf, 0, ®_num) != 0) - return -EINVAL; - adapter->sysfs_mii_control = reg_num; - - return ret; -} - -static ssize_t mii_value_info_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *netdev = to_net_device(dev); - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - int ret = count; - - u32 reg_value; - - if (kstrtou32(buf, 0, ®_value) != 0) - return -EINVAL; - adapter->sysfs_mii_value = reg_value; - - return ret; -} - -static int rnpgbe_mdio_read(struct net_device *netdev, int prtad, int devad, - u32 addr, u32 *phy_value) -{ - int rc = -EIO; - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - struct rnpgbe_hw *hw = &adapter->hw; - u16 value; - - rc = hw->ops.phy_read_reg(hw, addr, 0, &value); - *phy_value = value; - - return rc; -} - -static int rnpgbe_mdio_write(struct net_device *netdev, int prtad, int devad, - u16 addr, u16 value) -{ - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - struct rnpgbe_hw *hw = &adapter->hw; - - return hw->ops.phy_write_reg(hw, addr, 0, value); -} - -static ssize_t mii_info_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct net_device *netdev = to_net_device(dev); - struct rnpgbe_adapter *adapter = netdev_priv(netdev); - u32 reg_num = adapter->sysfs_mii_reg; - u32 reg_value = adapter->sysfs_mii_value; - int ret = 0; - u32 value; - - if (adapter->sysfs_mii_control) { - rnpgbe_mdio_write(netdev, 0, 0, reg_num, reg_value); - ret += sprintf(buf + ret, "write reg %x : %x\n", reg_num, - reg_value); - - } else { - rnpgbe_mdio_read(netdev, 0, 0, reg_num, &value); - ret += sprintf(buf + ret, "read reg %x : %x\n", reg_num, value); - } - - return ret; -} - static ssize_t tx_ring_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -957,8 +987,8 @@ static ssize_t active_vid_show(struct device *dev, u8 vfnum = hw->max_vfs - 1; if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { - current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, - vfnum)); + current_vid = hw_rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, + vfnum)); } for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { @@ -987,11 +1017,11 @@ static ssize_t active_vid_store(struct device *dev, return -EINVAL; if (vid < 4096 && test_bit(vid, adapter->active_vlans)) { - if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) { - wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(0, vfnum), vid); + if (hw_rd32(hw, RNP_DMA_VERSION) >= 0x20201231) { + hw_wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(0, vfnum), vid); } else { - wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum), - vid); + hw_wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum), + vid); } err = 0; } @@ -1060,9 +1090,13 @@ static ssize_t temperature_show(struct device *dev, struct rnpgbe_hw *hw = &adapter->hw; int ret = 0, temp = 0, voltage = 0; - temp = rnpgbe_mbx_get_temp(hw, &voltage); - - ret += sprintf(buf, "temp:%d oC\n", temp); + /* only n500 support temperature */ + if (hw->hw_type != rnpgbe_hw_n500) { + ret += sprintf(buf, "chip not support this\n"); + } else { + temp = rnpgbe_mbx_get_temp(hw, &voltage); + ret += sprintf(buf, "temp:%d oC\n", temp); + } return ret; } @@ -1103,10 +1137,6 @@ static DEVICE_ATTR_RO(temperature); static DEVICE_ATTR_RW(active_vid); static DEVICE_ATTR_RW(queue_mapping); static DEVICE_ATTR_RW(tx_ring_info); -static DEVICE_ATTR_RO(mii_info); -static DEVICE_ATTR_WO(mii_reg_info); -static DEVICE_ATTR_WO(mii_control_info); -static DEVICE_ATTR_WO(mii_value_info); static DEVICE_ATTR_RW(rx_ring_info); static DEVICE_ATTR_RW(tx_desc_info); static DEVICE_ATTR_RW(rx_desc_info); @@ -1116,6 +1146,7 @@ static DEVICE_ATTR_RW(tcp_sync_info); static DEVICE_ATTR_RO(rx_skip_info); static DEVICE_ATTR_RW(tx_stags_info); static DEVICE_ATTR_RW(gephy_test_info); +static DEVICE_ATTR_RO(version_info); static struct attribute *vendor_dev_attrs[] = { &dev_attr_pci.attr, @@ -1128,19 +1159,16 @@ static struct attribute *vendor_dev_attrs[] = { &dev_attr_rx_drop_info.attr, &dev_attr_outer_vlan_info.attr, &dev_attr_rx_skip_info.attr, - &dev_attr_mii_info.attr, - &dev_attr_mii_control_info.attr, - &dev_attr_mii_reg_info.attr, - &dev_attr_mii_value_info.attr, &dev_attr_gephy_test_info.attr, - &dev_attr_tx_stags_info.attr, NULL, }; static struct attribute *dev_attrs[] = { + &dev_attr_tx_stags_info.attr, &dev_attr_root_slot_info.attr, &dev_attr_active_vid.attr, &dev_attr_queue_mapping.attr, + &dev_attr_version_info.attr, &dev_attr_port_idx.attr, NULL, }; @@ -1169,6 +1197,15 @@ static const struct attribute_group *attr_grps[] = { static void rnpgbe_sysfs_del_adapter(struct rnpgbe_adapter __maybe_unused *adapter) { +#ifdef RNPGBE_HWMON + if (!adapter) + return; + + if (adapter->hwmon_dev) { + hwmon_device_unregister(adapter->hwmon_dev); + adapter->hwmon_dev = NULL; + } +#endif /* RNPGBE_HWMON */ } /* called from rnpgbe_main.c */ @@ -1218,7 +1255,7 @@ int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter) adapter->rnpgbe_hwmon_buff = rnpgbe_hwmon; - for (i = 0; i < RNP_MAX_SENSORS; i++) { + for (i = 0; i < RNPGBE_MAX_SENSORS; i++) { /* Only create hwmon sysfs entries for sensors that have * meaningful data for. */ @@ -1244,13 +1281,13 @@ int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter) rnpgbe_hwmon->group.attrs = rnpgbe_hwmon->attrs; hwmon_dev = devm_hwmon_device_register_with_groups( - &adapter->pdev->dev, "rnp", rnpgbe_hwmon, rnpgbe_hwmon->groups); + &adapter->pdev->dev, "rnpgbe", rnpgbe_hwmon, rnpgbe_hwmon->groups); if (IS_ERR(hwmon_dev)) { rc = PTR_ERR(hwmon_dev); goto exit; } - + adapter->hwmon_dev = hwmon_dev; no_thermal: #endif /* RNPGBE_HWMON */ goto exit; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h index f8110463b422..409a810d88f4 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h @@ -52,6 +52,7 @@ #define PCI_DEVICE_ID_N500_DUAL_PORT 0x8318 #define PCI_DEVICE_ID_N500_VF 0x8309 #define PCI_DEVICE_ID_N210 0x8208 +#define PCI_DEVICE_ID_N210L 0x820a /* Wake Up Control */ #define RNP_WUC_PME_EN 0x00000002 /* PME Enable */ #define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */ @@ -95,16 +96,16 @@ #define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */ #define ADVERTISE_2500_FULL 0x0080 -#define RNP_MAX_SENSORS 1 +#define RNPGBE_MAX_SENSORS 1 struct rnpgbe_thermal_diode_data { - u8 location; - u8 temp; - u8 caution_thresh; - u8 max_op_thresh; + unsigned int location; + unsigned int temp; + unsigned int caution_thresh; + unsigned int max_op_thresh; }; struct rnpgbe_thermal_sensor_data { - struct rnpgbe_thermal_diode_data sensor[RNP_MAX_SENSORS]; + struct rnpgbe_thermal_diode_data sensor[RNPGBE_MAX_SENSORS]; }; /* Wake Up Status */ @@ -528,13 +529,9 @@ enum rnpgbe_rss_type { }; enum rnpgbe_hw_type { - rnpgbe_hw_uv440 = 0, - rnpgbe_hw_uv3p, - rnpgbe_hw_n10, - rnpgbe_hw_n20, - rnpgbe_hw_n400, - rnpgbe_hw_n500, + rnpgbe_hw_n500 = 0, rnpgbe_hw_n210, + rnpgbe_hw_n210L, }; enum rnpgbe_eth_type { rnpgbe_eth_n10 = 0, rnpgbe_eth_n500 }; @@ -682,6 +679,8 @@ struct rnpgbe_hw_stats { u64 dma_rx_drop_cnt_5; u64 dma_rx_drop_cnt_6; u64 dma_rx_drop_cnt_7; + u64 tx_pause; + u64 rx_pause; }; /* forward declaration */ @@ -741,8 +740,6 @@ struct rnpgbe_eth_operations { void (*set_vlan_filter)(struct rnpgbe_eth_info *eth, bool status); void (*set_outer_vlan_type)(struct rnpgbe_eth_info *eth, int type); void (*set_double_vlan)(struct rnpgbe_eth_info *eth, bool on); - void (*set_vxlan_port)(struct rnpgbe_eth_info *eth, u32 port); - void (*set_vxlan_mode)(struct rnpgbe_eth_info *eth, bool inner); s32 (*set_fc_mode)(struct rnpgbe_eth_info *eth); void (*set_rx)(struct rnpgbe_eth_info *eth, bool status); void (*set_fcs)(struct rnpgbe_eth_info *eth, bool status); @@ -753,7 +750,7 @@ struct rnpgbe_eth_operations { enum { rnpgbe_driver_insmod, rnpgbe_driver_suspuse, - rnpgbe_driver_force_control_mac, + rnpgbe_driver_force_control_phy, }; struct rnpgbe_hw_operations { @@ -782,8 +779,6 @@ struct rnpgbe_hw_operations { void (*set_txvlan_mode)(struct rnpgbe_hw *hw, bool vlan); void (*set_tx_maxrate)(struct rnpgbe_hw *hw, bool flag); void (*set_fcs_mode)(struct rnpgbe_hw *hw, bool status); - void (*set_vxlan_port)(struct rnpgbe_hw *hw, u32 port); - void (*set_vxlan_mode)(struct rnpgbe_hw *hw, bool inner); void (*set_mac_speed)(struct rnpgbe_hw *hw, bool link, u32 speed, bool duplex); void (*set_mac_rx)(struct rnpgbe_hw *hw, bool status); @@ -810,8 +805,6 @@ struct rnpgbe_hw_operations { s32 (*setup_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed adv, u32 autoneg, u32 speed, u32 duplex); void (*clean_link)(struct rnpgbe_hw *hw); - s32 (*get_link_capabilities)(struct rnpgbe_hw *hw, - rnpgbe_link_speed *speed, bool *autoneg); s32 (*init_rx_addrs)(struct rnpgbe_hw *hw); void (*set_layer2_remapping)(struct rnpgbe_hw *hw, union rnpgbe_atr_input *input, u16 pri_id, @@ -851,6 +844,7 @@ struct rnpgbe_hw_operations { int (*get_ncsi_vlan)(struct rnpgbe_hw *hw, u16 *vlan, int idx); void (*set_lldp)(struct rnpgbe_hw *hw, bool enable); void (*get_lldp)(struct rnpgbe_hw *hw); + int (*dump_debug_regs)(struct rnpgbe_hw *hw, char *var); }; struct rnpgbe_mac_operations { @@ -1103,6 +1097,17 @@ struct lldp_status { int inteval; }; +struct rnpgbe_debug_reg { + char *name; + u32 offset; +}; + +struct rnpgbe_debug_reg_bits { + u32 flags; + u32 offset; + char *name[32]; +}; + struct rnpgbe_hw { void *back; u8 __iomem *hw_addr; @@ -1111,6 +1116,7 @@ struct rnpgbe_hw { u8 pfvfnum; u8 pfvfnum_system; struct pci_dev *pdev; + int msix_vector_base; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -1122,9 +1128,10 @@ struct rnpgbe_hw { int sfc_boot; int pxe_en; int ncsi_en; + int trim_valid; u8 is_backplane : 1; u8 is_sgmii : 1; - u8 force_10g_1g_speed_ablity : 1; + u8 force_10g_1g_speed_ability : 1; u8 force_speed_stat : 2; #define FORCE_SPEED_STAT_DISABLED 0 #define FORCE_SPEED_STAT_1G 1 @@ -1132,6 +1139,7 @@ struct rnpgbe_hw { u32 supported_link; u32 advertised_link; u32 autoneg; + u32 fake_autoneg; u32 tp_mdx; u32 tp_mdix_ctrl; u32 phy_id; @@ -1196,6 +1204,9 @@ struct rnpgbe_hw { bool wol_enabled; unsigned long wol_supported; int fw_version; + int force_en; + int force_cap; + u32 driver_version; u8 sfp_connector; struct vf_vebvlans vf_vas; diff --git a/drivers/net/ethernet/mucse/rnpgbe/version.h b/drivers/net/ethernet/mucse/rnpgbe/version.h index 50365c0b25c5..9c15cfa41999 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/version.h +++ b/drivers/net/ethernet/mucse/rnpgbe/version.h @@ -3,5 +3,5 @@ #ifndef VERSION_H #define VERSION_H -#define GIT_COMMIT " ad1ebc4" +#define GIT_COMMIT " 73a8c38" #endif -- Gitee