From f3b5f3e4d44388ac59bb08df2861ccfb2334ddd4 Mon Sep 17 00:00:00 2001 From: guoyongqiang12 Date: Tue, 4 Nov 2025 16:41:12 +0800 Subject: [PATCH] urma: kernel baseline Signed-off-by: Yongqiang Guo --- arch/arm64/configs/openeuler_defconfig | 1 + drivers/ub/Kconfig | 32 +- drivers/ub/Makefile | 1 + drivers/ub/urma/Makefile | 9 + drivers/ub/urma/ubagg/Makefile | 17 + drivers/ub/urma/ubagg/ubagg_bitmap.c | 145 + drivers/ub/urma/ubagg/ubagg_bitmap.h | 51 + drivers/ub/urma/ubagg/ubagg_hash_table.c | 239 ++ drivers/ub/urma/ubagg/ubagg_hash_table.h | 79 + drivers/ub/urma/ubagg/ubagg_ioctl.c | 1826 +++++++++ drivers/ub/urma/ubagg/ubagg_ioctl.h | 184 + drivers/ub/urma/ubagg/ubagg_jetty.c | 86 + drivers/ub/urma/ubagg/ubagg_jetty.h | 38 + drivers/ub/urma/ubagg/ubagg_log.c | 24 + drivers/ub/urma/ubagg/ubagg_log.h | 103 + drivers/ub/urma/ubagg/ubagg_main.c | 166 + drivers/ub/urma/ubagg/ubagg_seg.c | 138 + drivers/ub/urma/ubagg/ubagg_seg.h | 47 + drivers/ub/urma/ubagg/ubagg_topo_info.c | 80 + drivers/ub/urma/ubagg/ubagg_topo_info.h | 61 + drivers/ub/urma/ubagg/ubagg_types.h | 146 + drivers/ub/urma/ubcore/Makefile | 45 + drivers/ub/urma/ubcore/net/ubcore_cm.c | 115 + drivers/ub/urma/ubcore/net/ubcore_cm.h | 77 + drivers/ub/urma/ubcore/net/ubcore_net.c | 184 + drivers/ub/urma/ubcore/net/ubcore_net.h | 120 + drivers/ub/urma/ubcore/net/ubcore_session.c | 240 ++ drivers/ub/urma/ubcore/net/ubcore_session.h | 96 + drivers/ub/urma/ubcore/net/ubcore_sock.c | 676 ++++ drivers/ub/urma/ubcore/net/ubcore_sock.h | 35 + drivers/ub/urma/ubcore/ubcm/ub_cm.c | 500 +++ drivers/ub/urma/ubcore/ubcm/ub_cm.h | 57 + drivers/ub/urma/ubcore/ubcm/ub_mad.c | 1277 ++++++ drivers/ub/urma/ubcore/ubcm/ub_mad.h | 93 + drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h | 270 ++ drivers/ub/urma/ubcore/ubcm/ubcm_genl.c | 900 +++++ drivers/ub/urma/ubcore/ubcm/ubcm_genl.h | 139 + drivers/ub/urma/ubcore/ubcm/ubcm_log.c | 24 + drivers/ub/urma/ubcore/ubcm/ubcm_log.h | 103 + drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c | 1210 ++++++ drivers/ub/urma/ubcore/ubcore_cdev_file.c | 1274 ++++++ drivers/ub/urma/ubcore/ubcore_cdev_file.h | 47 + drivers/ub/urma/ubcore/ubcore_cgroup.c | 115 + drivers/ub/urma/ubcore/ubcore_cmd.h | 166 + drivers/ub/urma/ubcore/ubcore_cmd_tlv.c | 319 ++ drivers/ub/urma/ubcore/ubcore_cmd_tlv.h | 927 +++++ .../ub/urma/ubcore/ubcore_connect_adapter.c | 707 ++++ .../ub/urma/ubcore/ubcore_connect_adapter.h | 53 + .../ub/urma/ubcore/ubcore_connect_bonding.c | 450 +++ .../ub/urma/ubcore/ubcore_connect_bonding.h | 42 + drivers/ub/urma/ubcore/ubcore_ctp.c | 138 + drivers/ub/urma/ubcore/ubcore_ctp.h | 34 + drivers/ub/urma/ubcore/ubcore_device.c | 3191 +++++++++++++++ drivers/ub/urma/ubcore/ubcore_device.h | 65 + drivers/ub/urma/ubcore/ubcore_dp.c | 125 + drivers/ub/urma/ubcore/ubcore_genl.c | 233 ++ drivers/ub/urma/ubcore/ubcore_genl.h | 28 + drivers/ub/urma/ubcore/ubcore_genl_admin.c | 963 +++++ drivers/ub/urma/ubcore/ubcore_genl_admin.h | 44 + drivers/ub/urma/ubcore/ubcore_genl_define.h | 48 + drivers/ub/urma/ubcore/ubcore_hash_table.c | 253 ++ drivers/ub/urma/ubcore/ubcore_hash_table.h | 67 + drivers/ub/urma/ubcore/ubcore_jetty.c | 2682 +++++++++++++ drivers/ub/urma/ubcore/ubcore_log.c | 24 + drivers/ub/urma/ubcore/ubcore_log.h | 107 + drivers/ub/urma/ubcore/ubcore_main.c | 1129 ++++++ drivers/ub/urma/ubcore/ubcore_main.h | 56 + drivers/ub/urma/ubcore/ubcore_msg.c | 319 ++ drivers/ub/urma/ubcore/ubcore_msg.h | 171 + drivers/ub/urma/ubcore/ubcore_netdev.c | 709 ++++ drivers/ub/urma/ubcore/ubcore_netdev.h | 68 + drivers/ub/urma/ubcore/ubcore_netlink.c | 974 +++++ drivers/ub/urma/ubcore/ubcore_netlink.h | 230 ++ drivers/ub/urma/ubcore/ubcore_priv.h | 215 ++ drivers/ub/urma/ubcore/ubcore_segment.c | 261 ++ drivers/ub/urma/ubcore/ubcore_topo_info.c | 484 +++ drivers/ub/urma/ubcore/ubcore_topo_info.h | 71 + drivers/ub/urma/ubcore/ubcore_tp.c | 1693 ++++++++ drivers/ub/urma/ubcore/ubcore_tp.h | 96 + drivers/ub/urma/ubcore/ubcore_tp_table.c | 219 ++ drivers/ub/urma/ubcore/ubcore_tp_table.h | 75 + drivers/ub/urma/ubcore/ubcore_tpg.c | 407 ++ drivers/ub/urma/ubcore/ubcore_tpg.h | 50 + drivers/ub/urma/ubcore/ubcore_umem.c | 343 ++ drivers/ub/urma/ubcore/ubcore_utp.c | 150 + drivers/ub/urma/ubcore/ubcore_utp.h | 33 + drivers/ub/urma/ubcore/ubcore_uvs.c | 735 ++++ drivers/ub/urma/ubcore/ubcore_uvs.h | 67 + drivers/ub/urma/ubcore/ubcore_uvs_cmd.c | 320 ++ drivers/ub/urma/ubcore/ubcore_uvs_cmd.h | 55 + drivers/ub/urma/ubcore/ubcore_vtp.c | 2276 +++++++++++ drivers/ub/urma/ubcore/ubcore_vtp.h | 199 + drivers/ub/urma/ubcore/ubcore_workqueue.c | 132 + drivers/ub/urma/ubcore/ubcore_workqueue.h | 63 + drivers/ub/urma/uburma/Makefile | 15 + drivers/ub/urma/uburma/config/uburma.conf | 1 + drivers/ub/urma/uburma/uburma_cmd.c | 3435 +++++++++++++++++ drivers/ub/urma/uburma/uburma_cmd.h | 1086 ++++++ drivers/ub/urma/uburma/uburma_cmd_tlv.c | 2087 ++++++++++ drivers/ub/urma/uburma/uburma_cmd_tlv.h | 949 +++++ drivers/ub/urma/uburma/uburma_dev_ops.c | 282 ++ drivers/ub/urma/uburma/uburma_event.c | 818 ++++ drivers/ub/urma/uburma/uburma_event.h | 55 + drivers/ub/urma/uburma/uburma_file_ops.h | 34 + drivers/ub/urma/uburma/uburma_log.c | 24 + drivers/ub/urma/uburma/uburma_log.h | 103 + drivers/ub/urma/uburma/uburma_main.c | 420 ++ drivers/ub/urma/uburma/uburma_mmap.c | 181 + drivers/ub/urma/uburma/uburma_mmap.h | 33 + drivers/ub/urma/uburma/uburma_types.h | 106 + drivers/ub/urma/uburma/uburma_uobj.c | 1332 +++++++ drivers/ub/urma/uburma/uburma_uobj.h | 306 ++ include/ub/urma/ubcore_api.h | 169 + include/ub/urma/ubcore_jetty.h | 51 + include/ub/urma/ubcore_opcode.h | 126 + include/ub/urma/ubcore_types.h | 3074 +++++++++++++++ include/ub/urma/ubcore_uapi.h | 821 ++++ 117 files changed, 48834 insertions(+), 10 deletions(-) create mode 100644 drivers/ub/urma/Makefile create mode 100644 drivers/ub/urma/ubagg/Makefile create mode 100644 drivers/ub/urma/ubagg/ubagg_bitmap.c create mode 100644 drivers/ub/urma/ubagg/ubagg_bitmap.h create mode 100644 drivers/ub/urma/ubagg/ubagg_hash_table.c create mode 100644 drivers/ub/urma/ubagg/ubagg_hash_table.h create mode 100644 drivers/ub/urma/ubagg/ubagg_ioctl.c create mode 100644 drivers/ub/urma/ubagg/ubagg_ioctl.h create mode 100644 drivers/ub/urma/ubagg/ubagg_jetty.c create mode 100644 drivers/ub/urma/ubagg/ubagg_jetty.h create mode 100644 drivers/ub/urma/ubagg/ubagg_log.c create mode 100644 drivers/ub/urma/ubagg/ubagg_log.h create mode 100644 drivers/ub/urma/ubagg/ubagg_main.c create mode 100644 drivers/ub/urma/ubagg/ubagg_seg.c create mode 100644 drivers/ub/urma/ubagg/ubagg_seg.h create mode 100644 drivers/ub/urma/ubagg/ubagg_topo_info.c create mode 100644 drivers/ub/urma/ubagg/ubagg_topo_info.h create mode 100644 drivers/ub/urma/ubagg/ubagg_types.h create mode 100644 drivers/ub/urma/ubcore/Makefile create mode 100644 drivers/ub/urma/ubcore/net/ubcore_cm.c create mode 100644 drivers/ub/urma/ubcore/net/ubcore_cm.h create mode 100644 drivers/ub/urma/ubcore/net/ubcore_net.c create mode 100644 drivers/ub/urma/ubcore/net/ubcore_net.h create mode 100644 drivers/ub/urma/ubcore/net/ubcore_session.c create mode 100644 drivers/ub/urma/ubcore/net/ubcore_session.h create mode 100644 drivers/ub/urma/ubcore/net/ubcore_sock.c create mode 100644 drivers/ub/urma/ubcore/net/ubcore_sock.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_cm.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_cm.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_genl.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_genl.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_log.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_log.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c create mode 100644 drivers/ub/urma/ubcore/ubcore_cdev_file.c create mode 100644 drivers/ub/urma/ubcore/ubcore_cdev_file.h create mode 100644 drivers/ub/urma/ubcore/ubcore_cgroup.c create mode 100644 drivers/ub/urma/ubcore/ubcore_cmd.h create mode 100644 drivers/ub/urma/ubcore/ubcore_cmd_tlv.c create mode 100644 drivers/ub/urma/ubcore/ubcore_cmd_tlv.h create mode 100644 drivers/ub/urma/ubcore/ubcore_connect_adapter.c create mode 100644 drivers/ub/urma/ubcore/ubcore_connect_adapter.h create mode 100644 drivers/ub/urma/ubcore/ubcore_connect_bonding.c create mode 100644 drivers/ub/urma/ubcore/ubcore_connect_bonding.h create mode 100644 drivers/ub/urma/ubcore/ubcore_ctp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_ctp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_device.c create mode 100644 drivers/ub/urma/ubcore/ubcore_device.h create mode 100644 drivers/ub/urma/ubcore/ubcore_dp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_genl.c create mode 100644 drivers/ub/urma/ubcore/ubcore_genl.h create mode 100644 drivers/ub/urma/ubcore/ubcore_genl_admin.c create mode 100644 drivers/ub/urma/ubcore/ubcore_genl_admin.h create mode 100644 drivers/ub/urma/ubcore/ubcore_genl_define.h create mode 100644 drivers/ub/urma/ubcore/ubcore_hash_table.c create mode 100644 drivers/ub/urma/ubcore/ubcore_hash_table.h create mode 100644 drivers/ub/urma/ubcore/ubcore_jetty.c create mode 100644 drivers/ub/urma/ubcore/ubcore_log.c create mode 100644 drivers/ub/urma/ubcore/ubcore_log.h create mode 100644 drivers/ub/urma/ubcore/ubcore_main.c create mode 100644 drivers/ub/urma/ubcore/ubcore_main.h create mode 100644 drivers/ub/urma/ubcore/ubcore_msg.c create mode 100644 drivers/ub/urma/ubcore/ubcore_msg.h create mode 100644 drivers/ub/urma/ubcore/ubcore_netdev.c create mode 100644 drivers/ub/urma/ubcore/ubcore_netdev.h create mode 100644 drivers/ub/urma/ubcore/ubcore_netlink.c create mode 100644 drivers/ub/urma/ubcore/ubcore_netlink.h create mode 100644 drivers/ub/urma/ubcore/ubcore_priv.h create mode 100644 drivers/ub/urma/ubcore/ubcore_segment.c create mode 100644 drivers/ub/urma/ubcore/ubcore_topo_info.c create mode 100644 drivers/ub/urma/ubcore/ubcore_topo_info.h create mode 100644 drivers/ub/urma/ubcore/ubcore_tp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_tp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_tp_table.c create mode 100644 drivers/ub/urma/ubcore/ubcore_tp_table.h create mode 100644 drivers/ub/urma/ubcore/ubcore_tpg.c create mode 100644 drivers/ub/urma/ubcore/ubcore_tpg.h create mode 100644 drivers/ub/urma/ubcore/ubcore_umem.c create mode 100644 drivers/ub/urma/ubcore/ubcore_utp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_utp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs.c create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs.h create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs_cmd.c create mode 100644 drivers/ub/urma/ubcore/ubcore_uvs_cmd.h create mode 100644 drivers/ub/urma/ubcore/ubcore_vtp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_vtp.h create mode 100644 drivers/ub/urma/ubcore/ubcore_workqueue.c create mode 100644 drivers/ub/urma/ubcore/ubcore_workqueue.h create mode 100644 drivers/ub/urma/uburma/Makefile create mode 100644 drivers/ub/urma/uburma/config/uburma.conf create mode 100644 drivers/ub/urma/uburma/uburma_cmd.c create mode 100644 drivers/ub/urma/uburma/uburma_cmd.h create mode 100644 drivers/ub/urma/uburma/uburma_cmd_tlv.c create mode 100644 drivers/ub/urma/uburma/uburma_cmd_tlv.h create mode 100644 drivers/ub/urma/uburma/uburma_dev_ops.c create mode 100644 drivers/ub/urma/uburma/uburma_event.c create mode 100644 drivers/ub/urma/uburma/uburma_event.h create mode 100644 drivers/ub/urma/uburma/uburma_file_ops.h create mode 100644 drivers/ub/urma/uburma/uburma_log.c create mode 100644 drivers/ub/urma/uburma/uburma_log.h create mode 100644 drivers/ub/urma/uburma/uburma_main.c create mode 100644 drivers/ub/urma/uburma/uburma_mmap.c create mode 100644 drivers/ub/urma/uburma/uburma_mmap.h create mode 100644 drivers/ub/urma/uburma/uburma_types.h create mode 100644 drivers/ub/urma/uburma/uburma_uobj.c create mode 100644 drivers/ub/urma/uburma/uburma_uobj.h create mode 100644 include/ub/urma/ubcore_api.h create mode 100644 include/ub/urma/ubcore_jetty.h create mode 100644 include/ub/urma/ubcore_opcode.h create mode 100644 include/ub/urma/ubcore_types.h create mode 100644 include/ub/urma/ubcore_uapi.h diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index deceba201e29..3de2f7452bde 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -8325,6 +8325,7 @@ CONFIG_KWORKER_NUMA_AFFINITY=y # unified bus # CONFIG_UB=y +CONFIG_UB_URMA=m CONFIG_UB_UBL=m # Basic UB bus code compiled into the kernel CONFIG_UB_UBUS=y diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig index 9755ce29deea..117be6efe4e3 100644 --- a/drivers/ub/Kconfig +++ b/drivers/ub/Kconfig @@ -3,17 +3,29 @@ # UnifiedBus configuration # -menuconfig UB - bool "UB (UnifiedBus) support" - depends on ARM64 - default n - help - Support for UB. - If you have a hardware that support UB protocol, - Say y here. By Default this option is closed. - -if UB source "drivers/ub/ubus/Kconfig" source "drivers/ub/ubfi/Kconfig" source "drivers/ub/ubase/Kconfig" +menuconfig UB + tristate "Unified Bus (UB) core support" + depends on ARM64 || X86_64 + default n + help + Core support for Unified Bus (UB). + If you have a hardware that support ub protocol, + Say m here. By Default this option is closed. + To compile UB core as module, choose M here. + +if UB + +config UB_URMA + tristate "Unified Bus (UB) urma support" + default m + help + Unified remote memory access(URMA) support. This + is the kernel side of the userspace urma support, which allows + userspace processes to send and receive urma cmd. You will also + need liburma from umdk + . + endif # UB diff --git a/drivers/ub/Makefile b/drivers/ub/Makefile index 72eead96aa95..33bae06c8f04 100644 --- a/drivers/ub/Makefile +++ b/drivers/ub/Makefile @@ -3,3 +3,4 @@ obj-y += ubus/ obj-y += ubfi/ obj-$(CONFIG_UB_UBASE) += ubase/ +obj-$(CONFIG_UB_URMA) += urma/ diff --git a/drivers/ub/urma/Makefile b/drivers/ub/urma/Makefile new file mode 100644 index 000000000000..d22473e1f357 --- /dev/null +++ b/drivers/ub/urma/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + + +obj-$(CONFIG_UB_URMA) += ubagg/ +obj-$(CONFIG_UB_URMA) += ubcore/ +obj-$(CONFIG_UB_URMA) += uburma/ diff --git a/drivers/ub/urma/ubagg/Makefile b/drivers/ub/urma/ubagg/Makefile new file mode 100644 index 000000000000..4163765db4c6 --- /dev/null +++ b/drivers/ub/urma/ubagg/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +ccflags-y += -I$(src) + +ubagg-objs := ubagg_bitmap.o \ + ubagg_hash_table.o \ + ubagg_jetty.o \ + ubagg_log.o \ + ubagg_main.o \ + ubagg_seg.o \ + ubagg_topo_info.o \ + ubagg_ioctl.o + +obj-$(CONFIG_UB_URMA) += ubagg.o diff --git a/drivers/ub/urma/ubagg/ubagg_bitmap.c b/drivers/ub/urma/ubagg/ubagg_bitmap.c new file mode 100644 index 000000000000..6a8a877bdf48 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_bitmap.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#include "ubagg_bitmap.h" +#include "ubagg_log.h" + +struct ubagg_bitmap *ubagg_bitmap_alloc(uint32_t bitmap_size) +{ + struct ubagg_bitmap *bitmap; + + bitmap = kcalloc(1, sizeof(struct ubagg_bitmap), GFP_KERNEL); + if (bitmap == NULL) + return NULL; + bitmap->size = bitmap_size; + bitmap->bits = kcalloc(BITS_TO_LONGS(bitmap_size), + sizeof(unsigned long), GFP_KERNEL); + if (bitmap->bits == NULL) { + kfree(bitmap); + return NULL; + } + bitmap->alloc_idx = 0; + spin_lock_init(&bitmap->lock); + return bitmap; +} + +void ubagg_bitmap_free(struct ubagg_bitmap *bitmap) +{ + spin_lock(&bitmap->lock); + if (bitmap->bits != NULL) + kfree(bitmap->bits); + spin_unlock(&bitmap->lock); + kfree(bitmap); + bitmap = NULL; +} + +int ubagg_bitmap_alloc_idx_from_offset(struct ubagg_bitmap *bitmap, int offset) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + spin_lock(&bitmap->lock); + idx = (int)find_next_zero_bit(bitmap->bits, bitmap->size, offset); + if (idx >= bitmap->size || idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + + set_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_alloc_idx_from_offset_nolock(struct ubagg_bitmap *bitmap, + uint64_t offset) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + idx = (int)find_next_zero_bit(bitmap->bits, bitmap->size, offset); + if (idx >= bitmap->size || idx < 0) { + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + + set_bit(idx, bitmap->bits); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_alloc_idx(struct ubagg_bitmap *bitmap) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + spin_lock(&bitmap->lock); + idx = (int)find_first_zero_bit(bitmap->bits, bitmap->size); + if (idx >= bitmap->size || idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + set_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_use_id(struct ubagg_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (test_bit(id, bitmap->bits) != 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("Bit %u is already taken.\n", id); + return -1; + } + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +int ubagg_bitmap_free_idx(struct ubagg_bitmap *bitmap, int idx) +{ + spin_lock(&bitmap->lock); + if (idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("idx invalid, idx:%d.\n", idx); + return -EINVAL; + } + if (test_bit(idx, bitmap->bits) == 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("idx not set: %d.\n", idx); + return -EINVAL; + } + clear_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_bitmap.h b/drivers/ub/urma/ubagg/ubagg_bitmap.h new file mode 100644 index 000000000000..29f3d282218e --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_bitmap.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_BITMAP_H +#define UBAGG_BITMAP_H + +#include +#include "ubagg_types.h" + +struct ubagg_bitmap { + unsigned long *bits; + uint32_t size; + spinlock_t lock; + uint64_t alloc_idx; /* Allocated index */ +}; + +#define UBAGG_BITMAP_MAX_SIZE (1 << 16) + +struct ubagg_bitmap *ubagg_bitmap_alloc(uint32_t bitmap_size); + +void ubagg_bitmap_free(struct ubagg_bitmap *bitmap); + +int ubagg_bitmap_alloc_idx(struct ubagg_bitmap *bitmap); + +int ubagg_bitmap_use_id(struct ubagg_bitmap *bitmap, uint32_t id); + +int ubagg_bitmap_free_idx(struct ubagg_bitmap *bitmap, int idx); + +int ubagg_bitmap_alloc_idx_from_offset(struct ubagg_bitmap *bitmap, int offset); + +int ubagg_bitmap_alloc_idx_from_offset_nolock(struct ubagg_bitmap *bitmap, + uint64_t offset); + +#endif diff --git a/drivers/ub/urma/ubagg/ubagg_hash_table.c b/drivers/ub/urma/ubagg/ubagg_hash_table.c new file mode 100644 index 000000000000..08f2854252ac --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_hash_table.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: implement hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#include +#include "ubagg_hash_table.h" + +int ubagg_hash_table_alloc(struct ubagg_hash_table *ht, + struct ubagg_ht_param *p) +{ + uint32_t i; + + if (p == NULL || p->size == 0) + return -1; + + ht->head = kcalloc(p->size, sizeof(struct hlist_head), GFP_KERNEL); + if (ht->head == NULL) + return -ENOMEM; + + ht->p = *p; + for (i = 0; i < p->size; i++) + INIT_HLIST_HEAD(&ht->head[i]); + + spin_lock_init(&ht->lock); + kref_init(&ht->kref); + return 0; +} + +void ubagg_hash_table_free_with_cb(struct ubagg_hash_table *ht, + void (*free_cb)(void *)) +{ + struct hlist_node *pos = NULL, *next = NULL; + struct hlist_head *head; + uint32_t i; + void *obj; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + for (i = 0; i < ht->p.size; i++) { + hlist_for_each_safe(pos, next, &ht->head[i]) { + obj = ubagg_ht_obj(ht, pos); + hlist_del(pos); + spin_unlock(&ht->lock); + if (free_cb != NULL) + free_cb(obj); + else + kfree(obj); + spin_lock(&ht->lock); + } + } + head = ht->head; + ht->head = NULL; + spin_unlock(&ht->lock); + if (head != NULL) + kfree(head); +} + +void ubagg_hash_table_free(struct ubagg_hash_table *ht) +{ + ubagg_hash_table_free_with_cb(ht, NULL); +} + +void ubagg_hash_table_add_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + INIT_HLIST_NODE(hnode); + hlist_add_head(hnode, &ht->head[hash % ht->p.size]); +} + +void ubagg_hash_table_add(struct ubagg_hash_table *ht, struct hlist_node *hnode, + uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + ubagg_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); +} + +void ubagg_hash_table_remove_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + if (ht->head == NULL) + return; + + hlist_del_init(hnode); +} + +void ubagg_hash_table_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + ubagg_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); +} + +int ubagg_hash_table_check_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + if (hlist_unhashed(hnode)) { + spin_unlock(&ht->lock); + return -EINVAL; + } + ubagg_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); + return 0; +} + +void *ubagg_hash_table_lookup_nolock_get(struct ubagg_hash_table *ht, + uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + + return obj; +} + +void *ubagg_hash_table_lookup_get(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubagg_hash_table_lookup_nolock_get(ht, hash, key); + + spin_unlock(&ht->lock); + return obj; +} + +void *ubagg_hash_table_lookup_nolock(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + return obj; +} + +void *ubagg_hash_table_lookup(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubagg_hash_table_lookup_nolock(ht, hash, key); + spin_unlock(&ht->lock); + return obj; +} + +/* Do not insert a new entry if an old entry with the same key exists */ +int ubagg_hash_table_find_add(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -EINVAL; + } + /* Old entry with the same key exists */ + if (ubagg_hash_table_lookup_nolock(ht, hash, ubagg_ht_key(ht, hnode)) != + NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubagg_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +void *ubagg_hash_table_find_remove(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + struct hlist_node *pos = NULL, *next = NULL; + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + hlist_for_each_safe(pos, next, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + hlist_del(pos); + break; + } + obj = NULL; + } + spin_unlock(&ht->lock); + return obj; +} diff --git a/drivers/ub/urma/ubagg/ubagg_hash_table.h b/drivers/ub/urma/ubagg/ubagg_hash_table.h new file mode 100644 index 000000000000..981d4cc9f79a --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_hash_table.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: define hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#ifndef UBAGG_HASH_TABLE_H +#define UBAGG_HASH_TABLE_H + +#include "ubagg_types.h" + +static inline void *ubagg_ht_obj(const struct ubagg_hash_table *ht, + const struct hlist_node *hnode) +{ + return (char *)hnode - ht->p.node_offset; +} + +static inline void *ubagg_ht_key(const struct ubagg_hash_table *ht, + const struct hlist_node *hnode) +{ + return ((char *)hnode - ht->p.node_offset) + ht->p.key_offset; +} + +/* Init ht head, not calloc hash table itself */ +int ubagg_hash_table_alloc(struct ubagg_hash_table *ht, + struct ubagg_ht_param *p); +/* Free ht head, not release hash table itself */ +void ubagg_hash_table_free(struct ubagg_hash_table *ht); + +void ubagg_hash_table_free_with_cb(struct ubagg_hash_table *ht, + void (*free_cb)(void *)); + +void ubagg_hash_table_add(struct ubagg_hash_table *ht, struct hlist_node *hnode, + uint32_t hash); + +void ubagg_hash_table_add_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); + +void ubagg_hash_table_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +int ubagg_hash_table_check_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +void ubagg_hash_table_remove_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +void *ubagg_hash_table_lookup(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_nolock(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_get(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_nolock_get(struct ubagg_hash_table *ht, + uint32_t hash, const void *key); + +void *ubagg_hash_table_find_remove(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); +/* Do not insert a new entry if an old entry with the same key exists */ +int ubagg_hash_table_find_add(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); +#endif diff --git a/drivers/ub/urma/ubagg/ubagg_ioctl.c b/drivers/ub/urma/ubagg/ubagg_ioctl.c new file mode 100644 index 000000000000..a51f0f2ab655 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_ioctl.c @@ -0,0 +1,1826 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ +#include +#include +#include +#include + +#include +#include +#include +#include "ubagg_log.h" +#include "ubagg_ioctl.h" +#include "ubagg_jetty.h" +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_hash_table.h" + +#define UBAGG_DEVICE_MAX_EID_CNT 128 +#define UBAGG_MAX_BONDING_DEV_NUM 256 +#define UBAGG_DEV_NAME_PREFIX "bonding_dev_" +#define MAX_NUM_LEN 11 +#define BITMAP_OFFSET 1025 +#define BASE_DECIMAL 10 + +static LIST_HEAD(g_ubagg_dev_list); +static DEFINE_SPINLOCK(g_ubagg_dev_list_lock); + +struct seg_info_req { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t token_id; +}; + +struct jetty_info_req { + struct ubcore_jetty_id jetty_id; + bool is_jfr; +}; + +static struct ubagg_ht_param g_ubagg_ht_params[] = { + [UBAGG_HT_SEGMENT_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_seg_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_target_seg), + sizeof(uint32_t) }, + [UBAGG_HT_JETTY_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_jetty_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_jetty), sizeof(uint32_t) }, + [UBAGG_HT_JFR_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_jfr_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_jfr), sizeof(uint32_t) }, +}; + +static void ubagg_dev_release(struct kref *kref) +{ + struct ubagg_device *dev = container_of(kref, struct ubagg_device, ref); + + kfree(dev); +} + +void ubagg_dev_ref_get(struct ubagg_device *dev) +{ + kref_get(&dev->ref); +} + +void ubagg_dev_ref_put(struct ubagg_device *dev) +{ + kref_put(&dev->ref, ubagg_dev_release); +} + +struct ubagg_dev_name_eid_arr { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + char bonding_eid[EID_LEN]; +}; +static struct ubagg_dev_name_eid_arr + g_name_eid_arr[UBAGG_MAX_BONDING_DEV_NUM] = { 0 }; +static DEFINE_MUTEX(g_name_eid_arr_lock); + +static bool g_device_id_has_use[UBAGG_MAX_BONDING_DEV_NUM] = { 0 }; +static DEFINE_MUTEX(g_device_id_lock); + +static int find_bond_device_id(void) +{ + int use_id, i; + + mutex_lock(&g_device_id_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (g_device_id_has_use[i] == false) { + use_id = i; + g_device_id_has_use[i] = true; + break; + } + } + mutex_unlock(&g_device_id_lock); + if (i == UBAGG_MAX_BONDING_DEV_NUM) { + ubagg_log_err("no free device id.\n"); + return -1; + } + return use_id; +} + +static void release_bond_device_id(int id) +{ + mutex_lock(&g_device_id_lock); + g_device_id_has_use[id] = false; + mutex_unlock(&g_device_id_lock); +} + +static int release_bond_device_id_with_name(const char *str) +{ + const char *underscore_pos; + int id; + int ret; + + if (!str) { + ubagg_log_err("name str is null\n"); + return -EINVAL; + } + + underscore_pos = strrchr(str, '_'); + if (!underscore_pos) { + ubagg_log_err("invalid dev name: %s\n", str); + return -EINVAL; + } + if (underscore_pos[1] == '\0') { + ubagg_log_err("dev name is invalid\n"); + return -EINVAL; + } + ret = kstrtoint(underscore_pos + 1, BASE_DECIMAL, &id); + if (ret) { + ubagg_log_err("str to int failed\n"); + return ret; + } + release_bond_device_id(id); + return 0; +} + +static char *generate_master_dev_name(void) +{ + char *name = NULL; + int cur_id; + int max_length; + + cur_id = find_bond_device_id(); + if (cur_id < 0) { + ubagg_log_err("no free device id.\n"); + return NULL; + } + + max_length = strlen(UBAGG_DEV_NAME_PREFIX) + MAX_NUM_LEN; + name = kmalloc_array(max_length, sizeof(char), GFP_KERNEL); + if (name == NULL) { + release_bond_device_id(cur_id); + ubagg_log_err("malloc master dev name failed.\n"); + return NULL; + } + (void)snprintf(name, max_length, "%s%d", UBAGG_DEV_NAME_PREFIX, cur_id); + return name; +} + +static bool ubagg_dev_exists(char *dev_name) +{ + struct ubagg_device *dev; + + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) + return true; + } + return false; +} + +static struct ubagg_device *ubagg_find_dev_by_name(char *dev_name) +{ + struct ubagg_device *dev; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return dev; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return NULL; +} + +static struct ubagg_device * +ubagg_find_dev_by_name_and_rmv_from_list(char *dev_name) +{ + struct ubagg_device *dev, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + target = dev; + list_del(&dev->list_node); + ubagg_dev_ref_put(dev); + break; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return target; +} + +static bool get_slave_dev(char *dev_name, struct ubagg_slave_device *slave_dev) +{ + struct ubagg_device *ubagg_dev = ubagg_find_dev_by_name(dev_name); + int i; + + if (ubagg_dev == NULL) { + ubagg_log_err("aggregation device not exist."); + return false; + } + + slave_dev->slave_dev_num = ubagg_dev->slave_dev_num; + for (i = 0; i < ubagg_dev->slave_dev_num; i++) + (void)memcpy(slave_dev->slave_dev_name[i], + ubagg_dev->slave_dev_name[i], + UBAGG_MAX_DEV_NAME_LEN); + return true; +} + +static int ubagg_get_slave_device(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_slave_device slave_dev = { 0 }; + int ret; + + if (!get_slave_dev(dev->dev_name, &slave_dev)) { + ubagg_log_err("ubagg dev not exist:%s", dev->dev_name); + return -ENXIO; + } + + if (user_ctl->out.len < sizeof(struct ubagg_slave_device)) { + ubagg_log_err( + "ubagg user ctl has no enough space, buffer size:%u, needed size:%lu", + user_ctl->out.len, sizeof(struct ubagg_slave_device)); + return -ENOSPC; + } + + ret = copy_to_user((void __user *)user_ctl->out.addr, + (void *)&slave_dev, sizeof(slave_dev)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return 0; +} + +static struct ubagg_topo_info_out *get_topo_info(void) +{ + struct ubagg_topo_info_out *out = NULL; + struct ubagg_topo_map *topo_map = NULL; + + topo_map = get_global_ubagg_map(); + if (topo_map == NULL) + return NULL; + out = kzalloc(sizeof(struct ubagg_topo_info_out), GFP_KERNEL); + if (out == NULL) + return NULL; + (void)memcpy(out->topo_info, topo_map->topo_infos, + sizeof(topo_map->topo_infos)); + out->node_num = topo_map->node_num; + return out; +} + +static int ubagg_get_topo_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_topo_info_out *topo_info_out = NULL; + int ret; + + topo_info_out = get_topo_info(); + if (!topo_info_out) { + ubagg_log_err("ubagg dev topo info does not exist:%s", + dev->dev_name); + return -ENXIO; + } + + if (user_ctl->out.len < sizeof(struct ubagg_topo_info_out)) { + ubagg_log_err( + "ubagg user ctl has no enough space, buffer size:%u, needed size:%lu", + user_ctl->out.len, sizeof(struct ubagg_topo_info_out)); + kfree(topo_info_out); + return -ENOSPC; + } + + ret = copy_to_user((void __user *)user_ctl->out.addr, + (void *)topo_info_out, + sizeof(struct ubagg_topo_info_out)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + kfree(topo_info_out); + return -EFAULT; + } + kfree(topo_info_out); + return 0; +} + +static int ubagg_get_jfr_id(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + uint32_t id; + int ret; + + if ((ubagg_dev == NULL) || (ubagg_dev->jfr_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->jfr_bitmap NULL"); + return -1; + } + id = ubagg_bitmap_alloc_idx(ubagg_dev->jfr_bitmap); + ret = copy_to_user((void __user *)user_ctl->out.addr, (void *)&id, + sizeof(uint32_t)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return ret; +} + +static int ubagg_get_jetty_id(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + uint32_t id; + int ret; + + if ((ubagg_dev == NULL) || (ubagg_dev->jetty_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->jfr_bitmap NULL"); + return -1; + } + id = ubagg_bitmap_alloc_idx(ubagg_dev->jetty_bitmap); + ret = copy_to_user((void __user *)user_ctl->out.addr, (void *)&id, + sizeof(uint32_t)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return ret; +} + +static int ubagg_get_seg_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct ubagg_hash_table *ubagg_seg_ht = NULL; + struct ubagg_seg_hash_node *tmp_seg = NULL; + struct seg_info_req *req = NULL; + + if ((ubagg_dev == NULL) || (ubagg_dev->segment_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->segment_bitmap NULL"); + return -1; + } + + if (user_ctl->in.addr != 0 && + user_ctl->in.len != sizeof(struct seg_info_req)) { + ubagg_log_err("Invalid user in"); + return -1; + } + req = (struct seg_info_req *)user_ctl->in.addr; + + ubagg_seg_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT]; + spin_lock(&ubagg_seg_ht->lock); + tmp_seg = ubagg_hash_table_lookup_nolock(ubagg_seg_ht, req->token_id, + &req->token_id); + if (tmp_seg == NULL) { + spin_unlock(&ubagg_seg_ht->lock); + ubagg_log_err("Failed to find seg.\n"); + return -1; + } + memcpy((void *)user_ctl->out.addr, tmp_seg->ex_info.slaves, + sizeof(tmp_seg->ex_info.slaves)); + spin_unlock(&ubagg_seg_ht->lock); + return 0; +} + +static int ubagg_get_jetty_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_hash_table *ht = NULL; + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct jetty_info_req *req = NULL; + + if ((ubagg_dev == NULL) || (ubagg_dev->segment_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->segment_bitmap NULL"); + return -1; + } + + if (user_ctl->in.addr != 0 && + user_ctl->in.len != sizeof(struct jetty_info_req)) { + ubagg_log_err("Invalid user in"); + return -1; + } + req = (struct jetty_info_req *)user_ctl->in.addr; + + if (req->is_jfr) { + struct ubagg_jfr_hash_node *tmp_jfr = NULL; + + ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT]; + spin_lock(&ht->lock); + tmp_jfr = ubagg_hash_table_lookup_nolock(ht, req->jetty_id.id, + &req->jetty_id.id); + if (tmp_jfr == NULL) { + spin_unlock(&ht->lock); + ubagg_log_err("Failed to find jfr, jetty_id:%u.\n", + req->jetty_id.id); + return -1; + } + memcpy((void *)user_ctl->out.addr, &tmp_jfr->ex_info, + sizeof(tmp_jfr->ex_info)); + spin_unlock(&ht->lock); + } else { + struct ubagg_jetty_hash_node *tmp_jetty = NULL; + + ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT]; + spin_lock(&ht->lock); + tmp_jetty = ubagg_hash_table_lookup_nolock(ht, req->jetty_id.id, + &req->jetty_id.id); + if (tmp_jetty == NULL) { + spin_unlock(&ht->lock); + ubagg_log_err("Failed to find jetty, jetty_id:%u.\n", + req->jetty_id.id); + return -1; + } + memcpy((void *)user_ctl->out.addr, &tmp_jetty->ex_info, + sizeof(tmp_jetty->ex_info)); + spin_unlock(&ht->lock); + } + return 0; +} + +int ubagg_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *user_ctl) +{ + int ret = 0; + + if (dev == NULL || user_ctl == NULL) { + ubagg_log_err("Invalid parameter.\n"); + return -1; + } + + switch (user_ctl->in.opcode) { + case GET_SLAVE_DEVICE: { + ret = ubagg_get_slave_device(dev, user_ctl); + break; + } + case GET_TOPO_INFO: { + ret = ubagg_get_topo_info(dev, user_ctl); + break; + } + case GET_JFR_ID: { + ret = ubagg_get_jfr_id(dev, user_ctl); + break; + } + case GET_JETTY_ID: { + ret = ubagg_get_jetty_id(dev, user_ctl); + break; + } + case GET_SEG_INFO: { + ret = ubagg_get_seg_info(dev, user_ctl); + break; + } + case GET_JETTY_INFO: { + ret = ubagg_get_jetty_info(dev, user_ctl); + break; + } + default: { + ubagg_log_err("unsupported ubagg userctl opcde:%u", + user_ctl->in.opcode); + ret = -ENXIO; + } + } + + return ret; +} + +int ubagg_config_device(struct ubcore_device *dev, + struct ubcore_device_cfg *cfg) +{ + (void)dev; + (void)cfg; + return 0; +} + +static struct ubcore_ucontext * +ubagg_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data) +{ + (void)dev; + (void)eid_index; + (void)udrv_data; + return kzalloc(sizeof(struct ubcore_ucontext), GFP_KERNEL); +} + +static int ubagg_free_ucontext(struct ubcore_ucontext *uctx) +{ + kfree(uctx); + return 0; +} + +static int ubagg_query_device_attr(struct ubcore_device *dev, + struct ubcore_device_attr *attr) +{ + *attr = dev->attr; + return 0; +} + +struct ubcore_jfc *ubagg_create_jfc(struct ubcore_device *ub_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_jfc *jfc; + int id; + + if (ubagg_dev == NULL || ub_dev == NULL || cfg == NULL || + udata == NULL || udata->uctx == NULL) + return NULL; + + jfc = kzalloc(sizeof(struct ubagg_jfc), GFP_KERNEL); + if (jfc == NULL) + return NULL; + jfc->base.jfc_cfg.depth = cfg->depth; + spin_lock(&ubagg_dev->jfc_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfc_bitmap, ubagg_dev->jfc_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfc_bitmap->lock); + ubagg_log_err("failed to alloc jfc_id"); + kfree(jfc); + return NULL; + } + + jfc->base.id = id; + ubagg_dev->jfc_bitmap->alloc_idx = + (jfc->base.id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfc_bitmap->lock); + ubagg_log_info("ubagg jfc created successfully, id: %u.\n", + jfc->base.id); + return &jfc->base; +} + +int ubagg_destroy_jfc(struct ubcore_jfc *jfc) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfc *ubagg_jfc; + int id; + + if (jfc == NULL || jfc->ub_dev == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfc->ub_dev; + ubagg_jfc = ubagg_container_of(jfc, struct ubagg_jfc, base); + + id = jfc->id; + (void)ubagg_bitmap_free_idx(ubagg_dev->jfc_bitmap, id); + kfree(ubagg_jfc); + ubagg_log_info("ubagg jfc destroyed successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jfs *ubagg_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_jfs *jfs; + int id; + + if (ub_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL) + return NULL; + spin_lock(&ubagg_dev->jfs_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfs_bitmap, ubagg_dev->jfs_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfs_bitmap->lock); + ubagg_log_err("failed to alloc jfs_id: id has been used up.\n"); + return NULL; + } + ubagg_dev->jfs_bitmap->alloc_idx = (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfs_bitmap->lock); + + jfs = kzalloc(sizeof(struct ubagg_jfs), GFP_KERNEL); + if (IS_ERR_OR_NULL(jfs)) { + (void)ubagg_bitmap_free_idx(ubagg_dev->jfs_bitmap, id); + return NULL; + } + + jfs->base.jfs_cfg.depth = cfg->depth; + jfs->base.jfs_cfg.max_sge = cfg->max_sge; + jfs->base.jfs_cfg.max_rsge = cfg->max_rsge; + jfs->base.jfs_cfg.max_inline_data = cfg->max_inline_data; + jfs->base.jfs_cfg.trans_mode = cfg->trans_mode; + jfs->base.jfs_id.id = id; + + ubagg_log_info("ubagg create jfs successfully, id: %u.\n", + jfs->base.jfs_id.id); + return &jfs->base; +} + +int ubagg_destroy_jfs(struct ubcore_jfs *jfs) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfs *ubagg_jfs; + int id; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->uctx == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfs->ub_dev; + ubagg_jfs = ubagg_container_of(jfs, struct ubagg_jfs, base); + id = jfs->jfs_id.id; + (void)ubagg_bitmap_free_idx(ubagg_dev->jfs_bitmap, id); + kfree(ubagg_jfs); + ubagg_log_info("ubagg destroy jfs_ctx successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jfr *ubagg_create_jfr(struct ubcore_device *ub_dev, + struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_hash_table *ubagg_jfr_ht = NULL; + struct ubagg_jfr_hash_node *tmp_jfr = NULL; + struct ubagg_jfr_hash_node *jfr = NULL; + int ret = 0; + int id; + + if (ub_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL || cfg->id >= UBAGG_BITMAP_MAX_SIZE) + return NULL; + + id = cfg->id; + if (id == 0) { + spin_lock(&ubagg_dev->jfr_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfr_bitmap, + ubagg_dev->jfr_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfr_bitmap->lock); + ubagg_log_err( + "failed to alloc jfr_id: id has been used up.\n"); + return NULL; + } + ubagg_dev->jfr_bitmap->alloc_idx = + (id + 1) % UBAGG_BITMAP_MAX_SIZE == 0 ? + BITMAP_OFFSET : + (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfr_bitmap->lock); + } else { + if (ubagg_bitmap_use_id(ubagg_dev->jfr_bitmap, id) != 0) { + ubagg_log_err( + "failed to alloc jfr_id: id has been set.\n"); + return NULL; + } + } + + if (id == -1) { + ubagg_log_err("failed to alloc jfr_id: id has been used up.\n"); + return NULL; + } + + jfr = kzalloc(sizeof(struct ubagg_jfr_hash_node), GFP_KERNEL); + if (jfr == NULL) + goto FREE_ID; + + jfr->base.jfr_cfg.depth = cfg->depth; + jfr->base.jfr_cfg.max_sge = cfg->max_sge; + jfr->base.jfr_id.id = id; + jfr->token_id = id; + + ret = copy_from_user(&jfr->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("ubagg fail to copy from user, ret:%d.\n", ret); + goto FREE_JFR; + } + jfr->ex_info.base.id = id; + + ubagg_jfr_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT]; + spin_lock(&ubagg_jfr_ht->lock); + tmp_jfr = ubagg_hash_table_lookup_nolock(ubagg_jfr_ht, id, &id); + if (tmp_jfr != NULL) { + ubagg_log_err("id:%u already exists.\n", id); + // should remove it + ubagg_hash_table_remove_nolock(ubagg_jfr_ht, &tmp_jfr->hnode); + spin_unlock(&ubagg_jfr_ht->lock); + kfree(tmp_jfr); + goto FREE_JFR; + } + + ubagg_hash_table_add_nolock(ubagg_jfr_ht, &jfr->hnode, id); + spin_unlock(&ubagg_jfr_ht->lock); + + ubagg_log_info("ubagg create jfr_ctx successfully, id: %u.\n", + jfr->base.jfr_id.id); + return &jfr->base; + +FREE_JFR: + kfree(jfr); +FREE_ID: + (void)ubagg_bitmap_free_idx(ubagg_dev->jfr_bitmap, id); + + ubagg_log_err("ubagg fail to create jfr.\n"); + return NULL; +} + +int ubagg_destroy_jfr(struct ubcore_jfr *jfr) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfr_hash_node *ubagg_jfr; + int id; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->uctx == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfr->ub_dev; + ubagg_jfr = ubagg_container_of(jfr, struct ubagg_jfr_hash_node, base); + id = jfr->jfr_id.id; + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT], + &ubagg_jfr->hnode); + (void)ubagg_bitmap_free_idx(ubagg_dev->jfr_bitmap, id); + kfree(ubagg_jfr); + ubagg_log_info("ubagg destroy jfr_ctx successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jetty *ubagg_create_jetty(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(dev, struct ubagg_device, ub_dev); + struct ubagg_hash_table *ubagg_jetty_ht = NULL; + struct ubagg_jetty_hash_node *tmp_jetty = NULL; + struct ubagg_jetty_hash_node *jetty = NULL; + int ret; + int id; + + if (dev == NULL || cfg == NULL || udata == NULL || + cfg->id >= UBAGG_BITMAP_MAX_SIZE) + return NULL; + + id = cfg->id; + if (id == 0) { + spin_lock(&ubagg_dev->jetty_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jetty_bitmap, + ubagg_dev->jetty_bitmap->alloc_idx); + ubagg_log_err("jetty alloc bitmap, idx = %d\n", id); + if (id <= 0) { + spin_unlock(&ubagg_dev->jetty_bitmap->lock); + ubagg_log_err("failed to alloc jetty_id.\n"); + return NULL; + } + ubagg_dev->jetty_bitmap->alloc_idx = + (id + 1) % UBAGG_BITMAP_MAX_SIZE == 0 ? + BITMAP_OFFSET : + (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jetty_bitmap->lock); + } else { + if (ubagg_bitmap_use_id(ubagg_dev->jetty_bitmap, id) != 0) { + ubagg_log_err( + "failed to alloc jetty_id: id has been set.\n"); + return NULL; + } + } + + if (id == -1) { + ubagg_log_err( + "failed to alloc jetty_id: id has been used up.\n"); + return NULL; + } + + jetty = kzalloc(sizeof(struct ubagg_jetty_hash_node), GFP_KERNEL); + if (jetty == NULL) + goto FREE_ID; + + jetty->base.jetty_cfg = *cfg; + jetty->base.jetty_id.id = id; + jetty->token_id = id; + ret = copy_from_user(&jetty->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("ubagg fail to copy from user, ret:%d.\n", ret); + goto FREE_JETTY; + } + jetty->ex_info.base.id = id; + + ubagg_jetty_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT]; + spin_lock(&ubagg_jetty_ht->lock); + tmp_jetty = ubagg_hash_table_lookup_nolock(ubagg_jetty_ht, id, &id); + if (tmp_jetty != NULL) { + ubagg_log_err("id:%u already exists.\n", id); + // should remove it + ubagg_hash_table_remove_nolock(ubagg_jetty_ht, + &tmp_jetty->hnode); + spin_unlock(&ubagg_jetty_ht->lock); + kfree(tmp_jetty); + goto FREE_ID; + } + + ubagg_hash_table_add_nolock(ubagg_jetty_ht, &jetty->hnode, id); + spin_unlock(&ubagg_jetty_ht->lock); + + ubagg_log_info("ubagg create jetty_ctx successfully, jetty_id: %d\n", + jetty->base.jetty_id.id); + return &jetty->base; + +FREE_JETTY: + kfree(jetty); +FREE_ID: + (void)ubagg_bitmap_free_idx(ubagg_dev->jetty_bitmap, id); + + ubagg_log_err("ubagg fail to create jetty_ctx.\n"); + return NULL; +} + +int ubagg_destroy_jetty(struct ubcore_jetty *jetty) +{ + struct ubagg_jetty_hash_node *ubagg_jetty; + struct ubagg_device *ubagg_dev; + int id; + + if (jetty == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jetty->ub_dev; + ubagg_jetty = + ubagg_container_of(jetty, struct ubagg_jetty_hash_node, base); + id = jetty->jetty_id.id; + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT], + &ubagg_jetty->hnode); + (void)ubagg_bitmap_free_idx(ubagg_dev->jetty_bitmap, id); + kfree(ubagg_jetty); + ubagg_log_info("ubagg destroy jetty successfully, id: %u.\n", id); + return 0; +} + +int ubagg_query_device_status(struct ubcore_device *dev, + struct ubcore_device_status *status) +{ + int i; + + for (i = 0; i < UBCORE_MAX_PORT_CNT; ++i) { + status->port_status[i].state = UBCORE_PORT_ACTIVE; + status->port_status[i].active_mtu = UBCORE_MTU_4096; + status->port_status[i].active_speed = UBCORE_SP_400G; + status->port_status[i].active_width = UBCORE_LINK_X16; + } + return 0; +} + +static struct ubcore_ops g_ubagg_dev_ops = { + .owner = THIS_MODULE, + .driver_name = "ub_agg", + .abi_version = 0, + .user_ctl = ubagg_user_ctl, + .config_device = ubagg_config_device, + .alloc_ucontext = ubagg_alloc_ucontext, + .free_ucontext = ubagg_free_ucontext, + .query_device_attr = ubagg_query_device_attr, + .register_seg = ubagg_register_seg, + .unregister_seg = ubagg_unregister_seg, + .import_seg = ubagg_import_seg, + .unimport_seg = ubagg_unimport_seg, + .create_jfs = ubagg_create_jfs, + .destroy_jfs = ubagg_destroy_jfs, + .create_jfr = ubagg_create_jfr, + .destroy_jfr = ubagg_destroy_jfr, + .create_jetty = ubagg_create_jetty, + .destroy_jetty = ubagg_destroy_jetty, + .create_jfc = ubagg_create_jfc, + .destroy_jfc = ubagg_destroy_jfc, + .import_jfr = ubagg_import_jfr, + .unimport_jfr = ubagg_unimport_jfr, + .import_jetty = ubagg_import_jetty, + .unimport_jetty = ubagg_unimport_jetty, + .query_device_status = ubagg_query_device_status, +}; + +static void set_ubagg_device_attr(struct ubcore_device *dev, + struct ubagg_device_cap *dev_cap) +{ + dev->attr.dev_cap.feature = dev_cap->feature; + dev->attr.dev_cap.max_jfc = dev_cap->max_jfc; + dev->attr.dev_cap.max_jfs = dev_cap->max_jfs; + dev->attr.dev_cap.max_jfr = dev_cap->max_jfr; + dev->attr.dev_cap.max_jetty = dev_cap->max_jetty; + dev->attr.dev_cap.max_jetty_grp = dev_cap->max_jetty_grp; + dev->attr.dev_cap.max_jetty_in_jetty_grp = + dev_cap->max_jetty_in_jetty_grp; + dev->attr.dev_cap.max_jfc_depth = dev_cap->max_jfc_depth; + dev->attr.dev_cap.max_jfs_depth = dev_cap->max_jfs_depth; + dev->attr.dev_cap.max_jfr_depth = dev_cap->max_jfr_depth; + dev->attr.dev_cap.max_jfs_inline_size = dev_cap->max_jfs_inline_size; + dev->attr.dev_cap.max_jfs_sge = dev_cap->max_jfs_sge; + dev->attr.dev_cap.max_jfs_rsge = dev_cap->max_jfs_rsge; + dev->attr.dev_cap.max_jfr_sge = dev_cap->max_jfr_sge; + dev->attr.dev_cap.max_msg_size = dev_cap->max_msg_size; + dev->attr.dev_cap.max_read_size = dev_cap->max_read_size; + dev->attr.dev_cap.max_write_size = dev_cap->max_write_size; + dev->attr.dev_cap.max_cas_size = dev_cap->max_cas_size; + dev->attr.dev_cap.max_swap_size = dev_cap->max_swap_size; + dev->attr.dev_cap.max_fetch_and_add_size = + dev_cap->max_fetch_and_add_size; + dev->attr.dev_cap.max_fetch_and_sub_size = + dev_cap->max_fetch_and_sub_size; + dev->attr.dev_cap.max_fetch_and_and_size = + dev_cap->max_fetch_and_and_size; + dev->attr.dev_cap.max_fetch_and_or_size = + dev_cap->max_fetch_and_or_size; + dev->attr.dev_cap.max_fetch_and_xor_size = + dev_cap->max_fetch_and_xor_size; + dev->attr.dev_cap.atomic_feat = dev_cap->atomic_feat; + dev->attr.dev_cap.trans_mode = dev_cap->trans_mode; + dev->attr.dev_cap.sub_trans_mode_cap = dev_cap->sub_trans_mode_cap; + dev->attr.dev_cap.congestion_ctrl_alg = dev_cap->congestion_ctrl_alg; + dev->attr.dev_cap.ceq_cnt = dev_cap->congestion_ctrl_alg; + dev->attr.dev_cap.max_tp_in_tpg = dev_cap->max_tp_in_tpg; + dev->attr.dev_cap.max_eid_cnt = dev_cap->max_eid_cnt; + dev->attr.dev_cap.page_size_cap = dev_cap->page_size_cap; + dev->attr.dev_cap.max_oor_cnt = dev_cap->max_oor_cnt; + dev->attr.dev_cap.mn = dev_cap->mn; + dev->attr.dev_cap.max_netaddr_cnt = dev_cap->max_netaddr_cnt; +} + +static void ubagg_reserve_jetty_id(struct ubagg_device *dev) +{ + if (ubagg_bitmap_alloc_idx(dev->jfs_bitmap) != 0) + ubagg_log_err("Failed to reserve jfs id = 0.\n"); + + if (ubagg_bitmap_alloc_idx(dev->jfr_bitmap) != 0) + ubagg_log_err("Failed to reserve jfr id = 0.\n"); + + if (ubagg_bitmap_alloc_idx(dev->jetty_bitmap) != 0) + ubagg_log_err("Failed to reserve jetty id = 0.\n"); +} + +static int alloc_ubagg_dev_bitmap(struct ubagg_device *ubagg_dev) +{ + ubagg_dev->jfc_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfc_bitmap == NULL) { + ubagg_log_err("failed alloc jfc bitmap.\n"); + return -1; + } + ubagg_dev->jfs_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfs_bitmap == NULL) { + ubagg_log_err("failed alloc jfs bitmap.\n"); + goto free_jfc_bitmap; + } + ubagg_dev->jfr_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfr_bitmap == NULL) { + ubagg_log_err("failed alloc jfr bitmap.\n"); + goto free_jfs_bitmap; + } + ubagg_dev->jfr_bitmap->alloc_idx = BITMAP_OFFSET; + ubagg_dev->jetty_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jetty_bitmap == NULL) { + ubagg_log_err("failed alloc jetty bitmap.\n"); + goto free_jfr_bitmap; + } + ubagg_dev->jetty_bitmap->alloc_idx = BITMAP_OFFSET; + ubagg_dev->segment_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->segment_bitmap == NULL) { + ubagg_log_err("failed alloc seg bitmap.\n"); + goto free_jetty_bitmap; + } + ubagg_reserve_jetty_id(ubagg_dev); + + return 0; +free_jetty_bitmap: + if (ubagg_dev->jetty_bitmap != NULL) { + kfree(ubagg_dev->jetty_bitmap); + ubagg_dev->jetty_bitmap = NULL; + } +free_jfr_bitmap: + if (ubagg_dev->jfr_bitmap != NULL) { + kfree(ubagg_dev->jfr_bitmap); + ubagg_dev->jfr_bitmap = NULL; + } +free_jfs_bitmap: + if (ubagg_dev->jfs_bitmap != NULL) { + kfree(ubagg_dev->jfs_bitmap); + ubagg_dev->jfs_bitmap = NULL; + } +free_jfc_bitmap: + if (ubagg_dev->jfc_bitmap != NULL) { + kfree(ubagg_dev->jfc_bitmap); + ubagg_dev->jfc_bitmap = NULL; + } + return -1; +} + +static void free_ubagg_dev_bitmap(struct ubagg_device *ubagg_dev) +{ + ubagg_bitmap_free(ubagg_dev->segment_bitmap); + ubagg_bitmap_free(ubagg_dev->jetty_bitmap); + ubagg_bitmap_free(ubagg_dev->jfr_bitmap); + ubagg_bitmap_free(ubagg_dev->jfs_bitmap); + ubagg_bitmap_free(ubagg_dev->jfc_bitmap); +} + +static struct ubagg_device *ubagg_dev_create(struct ubagg_add_dev *arg) +{ + struct ubagg_device *cur, *ubagg_dev = NULL; + unsigned long flags; + int ret, i; + + if (arg->in.slave_dev_num <= 0 || + arg->in.slave_dev_num > UBAGG_MAX_DEV_NUM) { + ubagg_log_err("slave dev num is invalid, slave_dev_num:%d\n", + arg->in.slave_dev_num); + return NULL; + } + + ubagg_dev = kzalloc(sizeof(struct ubagg_device), GFP_KERNEL); + if (ubagg_dev == NULL) + return NULL; + kref_init(&ubagg_dev->ref); + + // init ubagg device + (void)memcpy(ubagg_dev->master_dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->slave_dev_num = arg->in.slave_dev_num; + for (i = 0; i < arg->in.slave_dev_num; i++) { + (void)memcpy(ubagg_dev->slave_dev_name[i], + arg->in.slave_dev_name[i], UBAGG_MAX_DEV_NAME_LEN); + } + + // init ubcore_device + (void)memcpy(ubagg_dev->ub_dev.dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->ub_dev.ops = &g_ubagg_dev_ops; + + ubagg_dev->ub_dev.attr.tp_maintainer = false; + ubagg_dev->ub_dev.attr.dev_cap.max_eid_cnt = UBAGG_DEVICE_MAX_EID_CNT; + set_ubagg_device_attr(&ubagg_dev->ub_dev, &arg->in.dev_attr.dev_cap); + + ret = alloc_ubagg_dev_bitmap(ubagg_dev); + if (ret != 0) { + ubagg_log_err("ubagg alloc bitmap fail\n"); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + + ret = ubcore_register_device(&ubagg_dev->ub_dev); + if (ret != 0) { + ubagg_log_err("ubcore register device fail, name:%s\n", + arg->in.master_dev_name); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + + ubagg_dev->ub_dev.eid_table.eid_entries[0].eid_index = 0; + ubagg_dev->ub_dev.eid_table.eid_entries[0].net = &init_net; + (void)memcpy(&ubagg_dev->ub_dev.eid_table.eid_entries[0].eid, + &arg->in.eid, UBAGG_EID_SIZE); + ubagg_dev->ub_dev.eid_table.eid_entries[0].valid = true; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubagg_log_err("ubagg dev: %s exists in list\n", + arg->in.master_dev_name); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + } + list_add_tail(&ubagg_dev->list_node, &g_ubagg_dev_list); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubagg_dev_ref_get(ubagg_dev); + ubagg_log_info("ubagg dev: %s adds to list success\n", + arg->in.master_dev_name); + return ubagg_dev; +} + +static void ubagg_dev_destroy(char *name) +{ + struct ubagg_device *dev = NULL; + unsigned long flags; + bool dev_exist = false; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev->ub_dev.dev_name, name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + dev_exist = true; + list_del(&dev->list_node); + ubagg_dev_ref_put(dev); + break; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + + if (!dev_exist) { + ubagg_log_err("ubagg device %s is not exist in list\n", name); + return; + } + + ubcore_unregister_device(&dev->ub_dev); + free_ubagg_dev_bitmap(dev); + ubagg_dev_ref_put(dev); +} + +static int add_dev(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_add_dev arg; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_add_dev)) { + ubagg_log_err("add bond dev, hdr->args_len:%u is invalid\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + + if (ubagg_dev_exists(arg.in.master_dev_name)) { + ubagg_log_err("ubagg dev already exist, name:%s\n", + arg.in.master_dev_name); + return -EEXIST; + } + ubagg_dev = ubagg_dev_create(&arg); + if (ubagg_dev == NULL) { + ubagg_log_err("ubagg dev create fail, name:%s\n", + arg.in.master_dev_name); + return -1; + } + + if (!try_module_get(THIS_MODULE)) { + ubagg_log_err("try_module_get for ubagg fail.\n"); + goto module_get_fail; + } + return 0; + +module_get_fail: + ubagg_dev_destroy(ubagg_dev->master_dev_name); + return -ENODEV; +} + +static int rmv_dev(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_rmv_dev arg; + struct ubagg_device *ubagg_dev; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_rmv_dev)) { + ubagg_log_err("rmv bond dev, hdr->args_len:%u is invalid\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + + ubagg_dev = ubagg_find_dev_by_name_and_rmv_from_list( + arg.in.master_dev_name); + if (ubagg_dev == NULL) { + ubagg_log_err("ubagg dev not exist, name:%s\n", + arg.in.master_dev_name); + return -ENODEV; + } + ubagg_log_info("rmv ubagg dev from list success\n"); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + module_put(THIS_MODULE); + return 0; +} + +static bool is_eid_valid(const char *eid) +{ + int i; + + for (i = 0; i < EID_LEN; i++) { + if (eid[i] != 0) + return true; + } + return false; +} + +static bool is_bonding_and_primary_eid_valid(struct ubagg_topo_map *topo_map) +{ + int i, j; + bool has_primary_eid = false; + + for (i = 0; i < topo_map->node_num; i++) { + if (!is_eid_valid(topo_map->topo_infos[i].bonding_eid)) + return false; + has_primary_eid = false; + for (j = 0; j < IODIE_NUM; j++) { + if (is_eid_valid(topo_map->topo_infos[i] + .io_die_info[j] + .primary_eid)) + has_primary_eid = true; + } + if (!has_primary_eid) + return false; + } + return true; +} + +static int find_cur_node_index(struct ubagg_topo_map *topo_map, + uint32_t *node_index) +{ + int i; + + for (i = 0; i < topo_map->node_num; i++) { + if (topo_map->topo_infos[i].is_cur_node) { + *node_index = i; + break; + } + } + if (i == topo_map->node_num) { + ubagg_log_err("can not find cur node index\n"); + return -1; + } + return 0; +} + +static bool compare_eids(const char *eid1, const char *eid2) +{ + return memcmp(eid1, eid2, EID_LEN) == 0; +} + +static int update_peer_port_eid(struct ubagg_topo_info *new_topo_info, + struct ubagg_topo_info *old_topo_info) +{ + int i, j; + char *new_peer_port_eid; + char *old_peer_port_eid; + + for (i = 0; i < IODIE_NUM; i++) { + for (j = 0; j < MAX_PORT_NUM; j++) { + if (!is_eid_valid( + new_topo_info->io_die_info[i].port_eid[j])) + continue; + + new_peer_port_eid = + new_topo_info->io_die_info[i].peer_port_eid[j]; + old_peer_port_eid = + old_topo_info->io_die_info[i].peer_port_eid[j]; + + if (!is_eid_valid(new_peer_port_eid)) + continue; + if (is_eid_valid(old_peer_port_eid) && + !compare_eids(new_peer_port_eid, + old_peer_port_eid)) { + ubagg_log_err( + "peer port eid is not same, new: " EID_FMT + ", old: " EID_FMT "\n", + EID_RAW_ARGS(new_peer_port_eid), + EID_RAW_ARGS(old_peer_port_eid)); + return -1; + } + (void)memcpy(old_peer_port_eid, new_peer_port_eid, + EID_LEN); + } + } + return 0; +} + +static int ubagg_update_topo_info(struct ubagg_topo_map *new_topo_map, + struct ubagg_topo_map *old_topo_map) +{ + struct ubagg_topo_info *new_cur_node_info; + struct ubagg_topo_info *old_cur_node_info; + uint32_t new_cur_node_index = 0; + uint32_t old_cur_node_index = 0; + + if (new_topo_map == NULL || old_topo_map == NULL) { + ubagg_log_err("Invalid topo map\n"); + return -EINVAL; + } + if (!is_bonding_and_primary_eid_valid(new_topo_map)) { + ubagg_log_err("Invalid primary eid\n"); + return -EINVAL; + } + if (find_cur_node_index(new_topo_map, &new_cur_node_index) != 0) { + ubagg_log_err("find cur node index failed in new topo map\n"); + return -1; + } + new_cur_node_info = &(new_topo_map->topo_infos[new_cur_node_index]); + if (find_cur_node_index(old_topo_map, &old_cur_node_index) != 0) { + ubagg_log_err("find cur node index failed in old topo map\n"); + return -1; + } + old_cur_node_info = &(old_topo_map->topo_infos[old_cur_node_index]); + + if (update_peer_port_eid(new_cur_node_info, old_cur_node_info) != 0) { + ubagg_log_err("update peer port eid failed\n"); + return -1; + } + return 0; +} + +static bool has_add_dev_by_bonding_eid(const char *bonding_eid) +{ + int i; + + if (bonding_eid == NULL) { + ubagg_log_err("bonding_eid is NULL"); + return false; + } + mutex_lock(&g_name_eid_arr_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (compare_eids(bonding_eid, g_name_eid_arr[i].bonding_eid)) { + mutex_unlock(&g_name_eid_arr_lock); + return true; + } + } + mutex_unlock(&g_name_eid_arr_lock); + return false; +} + +static void fill_add_dev_cfg(struct ubagg_topo_info *topo_info, + struct ubagg_add_dev_by_uvs *arg) +{ + int i, j, k; + + (void)memcpy(&arg->bonding_eid, topo_info->bonding_eid, EID_LEN); + for (i = 0; i < IODIE_NUM; i++) + (void)memcpy(&arg->slave_eid[i].primary_eid, + topo_info->io_die_info[i].primary_eid, EID_LEN); + + for (j = 0; j < IODIE_NUM; j++) { + for (k = 0; k < MAX_PORT_NUM; k++) + (void)memcpy(&arg->slave_eid[j].port_eid[k], + topo_info->io_die_info[j].port_eid[k], + EID_LEN); + } +} + +static void +set_ubagg_device_attr_by_ubcore_cap(struct ubcore_device *dev, + struct ubcore_device_cap *dev_cap) +{ + dev->attr.dev_cap = *dev_cap; +} + +static int init_ubagg_dev(struct ubagg_device *ubagg_dev, + struct ubagg_add_dev_by_uvs *arg) +{ + struct ubcore_device *dev = NULL; + int slave_dev_idx = 0; + int i, j, k; + + // init ubagg device + (void)memcpy(ubagg_dev->master_dev_name, arg->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + for (i = 0; i < IODIE_NUM; i++) { + if (!is_eid_valid((char *)&arg->slave_eid[i].primary_eid.raw)) + continue; + dev = ubcore_get_device_by_eid(&arg->slave_eid[i].primary_eid, + UBCORE_TRANSPORT_UB); + if (dev == NULL) { + ubagg_log_err( + "primary slave %d dev not exist, eid: " EID_FMT + "\n", + i, EID_ARGS(arg->slave_eid[i].primary_eid)); + return -1; + } + if (slave_dev_idx == 0) + set_ubagg_device_attr_by_ubcore_cap(&ubagg_dev->ub_dev, + &dev->attr.dev_cap); + + (void)memcpy(ubagg_dev->slave_dev_name[slave_dev_idx], + dev->dev_name, UBAGG_MAX_DEV_NAME_LEN); + slave_dev_idx++; + } + + for (j = 0; j < IODIE_NUM; j++) { + for (k = 0; k < MAX_PORT_NUM; k++) { + if (!is_eid_valid( + (char *)&arg->slave_eid[j].port_eid[k].raw)) + continue; + dev = ubcore_get_device_by_eid( + &arg->slave_eid[j].port_eid[k], + UBCORE_TRANSPORT_UB); + if (dev == NULL) { + ubagg_log_err( + "port slave %d_%d dev not exist, eid: " EID_FMT + "\n", + j, k, + EID_ARGS( + arg->slave_eid[j].port_eid[k])); + return -1; + } + if (slave_dev_idx == 0) + set_ubagg_device_attr_by_ubcore_cap( + &ubagg_dev->ub_dev, &dev->attr.dev_cap); + + (void)memcpy(ubagg_dev->slave_dev_name[slave_dev_idx], + dev->dev_name, UBAGG_MAX_DEV_NAME_LEN); + slave_dev_idx++; + } + } + + if (slave_dev_idx == 0) { + ubagg_log_err("slave devs is null\n"); + return -1; + } + + ubagg_dev->slave_dev_num = slave_dev_idx; + return 0; +} + +static int init_ubagg_res(struct ubagg_device *ubagg_dev) +{ + int ret = 0; + int i = 0; + int j = 0; + + ret = alloc_ubagg_dev_bitmap(ubagg_dev); + if (ret != 0) { + ubagg_log_err("ubagg alloc bitmap fail\n"); + return ret; + } + + for (i = 0; i < UBAGG_HT_MAX; i++) { + ret = ubagg_hash_table_alloc(&ubagg_dev->ubagg_ht[i], + &g_ubagg_ht_params[i]); + if (ret != 0) { + ubagg_log_err("Fail to init hash map:%d.\n", i); + goto FREE_HMAP; + } + } + + return 0; + +FREE_HMAP: + for (j = 0; j < i; j++) + ubagg_hash_table_free(&ubagg_dev->ubagg_ht[j]); + free_ubagg_dev_bitmap(ubagg_dev); + + return -ENOMEM; +} + +static void uninit_ubagg_res(struct ubagg_device *ubagg_dev) +{ + int i = 0; + + free_ubagg_dev_bitmap(ubagg_dev); + for (i = 0; i < UBAGG_HT_MAX; i++) + ubagg_hash_table_free(&ubagg_dev->ubagg_ht[i]); +} + +static int init_ubagg_ubcore_dev(struct ubagg_device *ubagg_dev, + struct ubagg_add_dev_by_uvs *arg) +{ + int ret = 0; + + (void)memcpy(ubagg_dev->ub_dev.dev_name, arg->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->ub_dev.ops = &g_ubagg_dev_ops; + ubagg_dev->ub_dev.attr.tp_maintainer = false; + ubagg_dev->ub_dev.attr.dev_cap.max_eid_cnt = UBAGG_DEVICE_MAX_EID_CNT; + + ret = ubcore_register_device(&ubagg_dev->ub_dev); + if (ret != 0) { + ubagg_log_err("ubcore register device fail, name:%s\n", + arg->master_dev_name); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return ret; + } + + ubagg_dev->ub_dev.eid_table.eid_entries[0].eid_index = 0; + ubagg_dev->ub_dev.eid_table.eid_entries[0].net = &init_net; + (void)memcpy(&ubagg_dev->ub_dev.eid_table.eid_entries[0].eid, + &arg->bonding_eid, UBAGG_EID_SIZE); + ubagg_dev->ub_dev.eid_table.eid_entries[0].valid = true; + + return 0; +} + +static int add_dev_to_list(struct ubagg_device *ubagg_dev) +{ + struct ubagg_device *cur = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, ubagg_dev->ub_dev.dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return -EEXIST; + } + } + list_add_tail(&ubagg_dev->list_node, &g_ubagg_dev_list); + ubagg_dev_ref_get(ubagg_dev); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return 0; +} + +static void rmv_dev_from_list(struct ubagg_device *ubagg_dev) +{ + struct ubagg_device *cur = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, ubagg_dev->ub_dev.dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + list_del(&cur->list_node); + ubagg_dev_ref_put(ubagg_dev); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return; + } + } +} + +static int add_dev_by_uvs(struct ubagg_add_dev_by_uvs *arg) +{ + struct ubagg_device *ubagg_dev = NULL; + + if (ubagg_dev_exists(arg->master_dev_name)) { + ubagg_log_err("ubagg dev already exist, name:%s\n", + arg->master_dev_name); + return -EEXIST; + } + + ubagg_dev = kzalloc(sizeof(struct ubagg_device), GFP_KERNEL); + if (ubagg_dev == NULL) + return -ENOMEM; + kref_init(&ubagg_dev->ref); + + if (init_ubagg_dev(ubagg_dev, arg) != 0) { + ubagg_log_err("init ubagg dev fail, name:%s\n", + arg->master_dev_name); + goto PUT_DEV; + } + + if (init_ubagg_res(ubagg_dev) != 0) { + ubagg_log_err("init ubagg res fail, name:%s\n", + arg->master_dev_name); + goto PUT_DEV; + } + + if (init_ubagg_ubcore_dev(ubagg_dev, arg) != 0) { + ubagg_log_err("init ubagg ubcore fail, name:%s\n", + arg->master_dev_name); + goto UNINIT_UBAGG_RES; + } + + if (add_dev_to_list(ubagg_dev) != 0) { + ubagg_log_err("add dev to list fail, name:%s\n", + arg->master_dev_name); + goto UNINIT_UBCORE_DEV; + } + + if (!try_module_get(THIS_MODULE)) { + ubagg_log_err("try_module_get for ubagg fail.\n"); + goto REMOVE_DEV_LIST; + } + return 0; + +REMOVE_DEV_LIST: + rmv_dev_from_list(ubagg_dev); +UNINIT_UBCORE_DEV: + ubcore_unregister_device(&ubagg_dev->ub_dev); +UNINIT_UBAGG_RES: + uninit_ubagg_res(ubagg_dev); +PUT_DEV: + ubagg_dev_ref_put(ubagg_dev); + + return -ENODEV; +} + +static bool is_eid_empty(const char *eid) +{ + int i; + + for (i = 0; i < EID_LEN; i++) { + if (eid[i] != 0) + return false; + } + return true; +} + +static void find_add_master_dev(const char *bondingEid, const char *name) +{ + int i; + int empty_index = -1; + + mutex_lock(&g_name_eid_arr_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (is_eid_empty(g_name_eid_arr[i].bonding_eid)) { + empty_index = i; + break; + } + } + if (empty_index == -1) { + mutex_unlock(&g_name_eid_arr_lock); + ubagg_log_err("g_name_eid_arr is full, max dev num is %d", + UBAGG_MAX_BONDING_DEV_NUM); + return; + } + (void)memcpy(g_name_eid_arr[empty_index].bonding_eid, bondingEid, + EID_LEN); + (void)snprintf(g_name_eid_arr[empty_index].master_dev_name, + UBAGG_MAX_DEV_NAME_LEN, "%s", name); + mutex_unlock(&g_name_eid_arr_lock); +} + +static int ubagg_add_dev_by_uvs(struct ubagg_topo_map *topo_map) +{ + struct ubagg_topo_info *cur_node_info; + struct ubagg_add_dev_by_uvs arg = { 0 }; + char *master_dev_name = NULL; + uint32_t cur_node_index = 0; + + if (find_cur_node_index(topo_map, &cur_node_index) != 0) { + ubagg_log_err("find cur node index failed\n"); + return -1; + } + cur_node_info = &(topo_map->topo_infos[cur_node_index]); + + if (has_add_dev_by_bonding_eid(cur_node_info->bonding_eid)) { + ubagg_log_info("has add dev by bonding eid: " EID_FMT "\n", + EID_RAW_ARGS(cur_node_info->bonding_eid)); + return 0; + } + + master_dev_name = generate_master_dev_name(); + if (master_dev_name == NULL) { + ubagg_log_err("generate master dev name failed\n"); + return -1; + } + + (void)snprintf(arg.master_dev_name, UBAGG_MAX_DEV_NAME_LEN, "%s", + master_dev_name); + fill_add_dev_cfg(cur_node_info, &arg); + + if (add_dev_by_uvs(&arg) != 0) { + release_bond_device_id_with_name(master_dev_name); + kfree(master_dev_name); + ubagg_log_err("add ubagg dev by uvs failed\n"); + return -1; + } + find_add_master_dev(cur_node_info->bonding_eid, master_dev_name); + kfree(master_dev_name); + return 0; +} + +static void print_topo_map(struct ubagg_topo_map *topo_map) +{ + int i, j, k; + struct ubagg_topo_info *cur_node_info; + + ubagg_log_info( + "========================== topo map start =============================\n"); + for (i = 0; i < topo_map->node_num; i++) { + cur_node_info = topo_map->topo_infos + i; + if (is_eid_empty(cur_node_info->bonding_eid)) + continue; + + ubagg_log_info( + "===================== node %d start =======================\n", + i); + ubagg_log_info("bonding eid: " EID_FMT "\n", + EID_RAW_ARGS(cur_node_info->bonding_eid)); + for (j = 0; j < IODIE_NUM; j++) { + ubagg_log_info( + "\tprimary eid %d: " EID_FMT "\n", j, + EID_RAW_ARGS(cur_node_info->io_die_info[j] + .primary_eid)); + for (k = 0; k < MAX_PORT_NUM; k++) { + ubagg_log_info( + "\t\tport eid %d: " EID_FMT "\n", k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .port_eid[k])); + ubagg_log_info( + "\t\tpeer_port eid %d: " EID_FMT "\n", + k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .peer_port_eid[k])); + } + } + ubagg_log_info( + "===================== node %d end =======================\n", + i); + } + ubagg_log_info( + "========================== topo map end =============================\n"); +} + +static int ubagg_set_topo_info(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_set_topo_info arg; + struct ubagg_topo_map *new_topo_map; + struct ubagg_topo_map *topo_map; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_set_topo_info)) { + ubagg_log_err( + "set topo info, args_len is invalid, args_len:%u\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + if (arg.in.topo == NULL || arg.in.topo_num == 0 || + arg.in.topo_num > MAX_NODE_NUM) { + ubagg_log_err("Invalid set_topo_info param\n"); + return -EINVAL; + } + topo_map = get_global_ubagg_map(); + if (topo_map == NULL) { + topo_map = create_global_ubagg_topo_map(arg.in.topo, + arg.in.topo_num); + if (topo_map == NULL) { + ubagg_log_err("Failed to create topo map\n"); + return -ENOMEM; + } + if (!is_bonding_and_primary_eid_valid(topo_map)) { + delete_global_ubagg_topo_map(); + ubagg_log_err("Invalid primary eid\n"); + return -EINVAL; + } + } else { + // update topo_map + new_topo_map = create_ubagg_topo_map_from_user(arg.in.topo, + arg.in.topo_num); + if (ubagg_update_topo_info(new_topo_map, topo_map) != 0) { + delete_ubagg_topo_map(new_topo_map); + ubagg_log_err("Failed to update topo info\n"); + return -1; + } + delete_ubagg_topo_map(new_topo_map); + } + + print_topo_map(topo_map); + + if (ubagg_add_dev_by_uvs(topo_map) != 0) { + delete_global_ubagg_topo_map(); + ubagg_log_err("Failed to add dev by uvs\n"); + return -1; + } + return 0; +} + +int ubagg_delete_topo_map(void) +{ + delete_global_ubagg_topo_map(); + return 0; +} + +long ubagg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ubagg_cmd_hdr hdr; + int ret = 0; + + if (cmd != UBAGG_CMD || !capable(CAP_NET_ADMIN)) { + ubagg_log_err("bad ubagg ioctl cmd!"); + return -ENOIOCTLCMD; + } + + ret = copy_from_user(&hdr, (void *)arg, sizeof(struct ubagg_cmd_hdr)); + if (ret != 0) { + ubagg_log_err("copy from user fail, ret:%d", ret); + return -EFAULT; + } + switch (hdr.command) { + case UBAGG_ADD_DEV: + return add_dev(&hdr); + case UBAGG_RMV_DEV: + return rmv_dev(&hdr); + case UBAGG_SET_TOPO_INFO: + return ubagg_set_topo_info(&hdr); + default: + ubagg_log_err("Wrong command type:%u", hdr.command); + return -EINVAL; + } +} diff --git a/drivers/ub/urma/ubagg/ubagg_ioctl.h b/drivers/ub/urma/ubagg/ubagg_ioctl.h new file mode 100644 index 000000000000..bc17f1875149 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_ioctl.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg ioctl head file + * Author: Dongxu Li + * Create: 2025-1-26 + * Note: + * History: 2025-1-26: Create file + */ +#ifndef UBAGG_IOCTL_H +#define UBAGG_IOCTL_H + +#include +#include "ubagg_topo_info.h" +#include "ubagg_types.h" + +enum ubagg_cmd { + UBAGG_ADD_DEV = 1, + UBAGG_RMV_DEV, + UBAGG_SET_TOPO_INFO, +}; + +struct ubagg_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBAGG_CMD_MAGIC 'B' +#define UBAGG_CMD _IOWR(UBAGG_CMD_MAGIC, 1, struct ubagg_cmd_hdr) + +#define UBAGG_EID_SIZE (16) + +/** A copy of urma_device_cap. + * This module needs user pass `urma_device_cap` in ioctl, + * but it can't include `urma_types.h` in kmod. + * So we copy this structure. + */ +union ubagg_order_type_cap { + struct { + uint32_t ot : 1; + uint32_t oi : 1; + uint32_t ol : 1; + uint32_t no : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; +union ubagg_tp_type_cap { + struct { + uint32_t rtp : 1; + uint32_t ctp : 1; + uint32_t utp : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +union ubagg_tp_feature { + struct { + uint32_t rm_multi_path : 1; + uint32_t rc_multi_path : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubagg_device_cap { + union ubcore_device_feat feature; + uint32_t max_jfc; + uint32_t max_jfs; + uint32_t max_jfr; + uint32_t max_jetty; + uint32_t max_jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t max_jfc_depth; + uint32_t max_jfs_depth; + uint32_t max_jfr_depth; + uint32_t max_jfs_inline_size; + uint32_t max_jfs_sge; + uint32_t max_jfs_rsge; + uint32_t max_jfr_sge; + uint64_t max_msg_size; + uint32_t max_read_size; + uint32_t max_write_size; + uint32_t max_cas_size; + uint32_t max_swap_size; + uint32_t max_fetch_and_add_size; + uint32_t max_fetch_and_sub_size; + uint32_t max_fetch_and_and_size; + uint32_t max_fetch_and_or_size; + uint32_t max_fetch_and_xor_size; + union ubcore_atomic_feat atomic_feat; + uint16_t trans_mode; /* one or more from ubcore_transport_mode_t */ + uint16_t sub_trans_mode_cap; /* one or more from ubcore_sub_trans_mode_cap */ + uint16_t congestion_ctrl_alg; /* one or more mode from ubcore_congestion_ctrl_alg_t */ + uint32_t ceq_cnt; /* completion vector count */ + uint32_t max_tp_in_tpg; + uint32_t max_eid_cnt; + uint64_t page_size_cap; + uint32_t max_oor_cnt; /* max OOR window size by packet */ + uint32_t mn; + uint32_t max_netaddr_cnt; + union ubagg_order_type_cap rm_order_cap; + union ubagg_order_type_cap rc_order_cap; + union ubagg_tp_type_cap rm_tp_cap; + union ubagg_tp_type_cap rc_tp_cap; + union ubagg_tp_type_cap um_tp_cap; + union ubagg_tp_feature tp_feature; +}; +/** A structure mimicking `urma_device_attr`. + * The field `dev_cap` is the same of that in `urma_device_attr`. + */ +struct ubagg_config_dev_attr { + struct ubagg_device_cap dev_cap; +}; + +struct ubagg_add_dev { + struct { + int slave_dev_num; + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; + union ubcore_eid eid; + struct ubagg_config_dev_attr dev_attr; + } in; +}; + +struct ubagg_rmv_dev { + struct { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + } in; +}; + +struct ubagg_set_topo_info { + struct { + void *topo; + uint32_t topo_num; + } in; +}; + +enum ubagg_userctl_opcode { + GET_SLAVE_DEVICE = 1, + GET_TOPO_INFO = 2, + GET_JFR_ID = 3, + GET_JETTY_ID = 4, + GET_SEG_INFO = 5, + GET_JETTY_INFO = 6, +}; + +struct ubagg_slave_device { + int slave_dev_num; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; +}; + +struct ubagg_topo_info_out { + struct ubagg_topo_info topo_info[MAX_NODE_NUM]; + uint32_t node_num; +}; + +int ubagg_delete_topo_map(void); + +struct ubagg_primary_port_eid { + union ubcore_eid primary_eid; + union ubcore_eid port_eid[MAX_PORT_NUM]; +}; + +struct ubagg_add_dev_by_uvs { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + union ubcore_eid bonding_eid; + struct ubagg_primary_port_eid slave_eid[IODIE_NUM]; +}; + +long ubagg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#endif // UBAGG_IOCTL_H diff --git a/drivers/ub/urma/ubagg/ubagg_jetty.c b/drivers/ub/urma/ubagg/ubagg_jetty.c new file mode 100644 index 000000000000..f3fa47554474 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_jetty.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg jetty ops implementation + * Author: Wang Hang + * Create: 2025-08-13 + * Note: + * History: 2025-08-13: Create file + */ + +#include "ubagg_jetty.h" +#include "ubagg_log.h" + +struct ubagg_target_jetty { + struct ubcore_tjetty base; +}; + +struct ubcore_tjetty *ubagg_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_target_jetty *tjfr; + + if (dev == NULL || cfg == NULL || udata == NULL || udata->uctx == NULL) + return NULL; + + tjfr = kzalloc(sizeof(struct ubagg_target_jetty), GFP_KERNEL); + if (tjfr == NULL) + return NULL; + ubagg_log_info("Import jfr successfully, is:%u.\n", cfg->id.id); + return &tjfr->base; +} + +int ubagg_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct ubagg_target_jetty *ubagg_tjfr; + + if (tjfr == NULL || tjfr->ub_dev == NULL || tjfr->uctx == NULL) { + ubagg_log_err("Invalid parameter.\n"); + return -EINVAL; + } + ubagg_tjfr = (struct ubagg_target_jetty *)tjfr; + ubagg_log_info("Unimport jfr successfully, id:%u.\n", + ubagg_tjfr->base.cfg.id.id); + kfree(ubagg_tjfr); + return 0; +} + +struct ubcore_tjetty *ubagg_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_target_jetty *tjetty; + + if (cfg == NULL || dev == NULL || udata == NULL) + return NULL; + + tjetty = kzalloc(sizeof(struct ubagg_target_jetty), GFP_KERNEL); + if (tjetty == NULL) + return NULL; + ubagg_log_info("Import jetty successfully, %u\n", cfg->id.id); + return &tjetty->base; +} + +int ubagg_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct ubagg_target_jetty *ubagg_tjetty; + + if (tjetty == NULL || tjetty->ub_dev == NULL || tjetty->uctx == NULL) + return -EINVAL; + ubagg_tjetty = (struct ubagg_target_jetty *)tjetty; + ubagg_log_info("Unimport jetty successfully, id:%u.\n", + tjetty->cfg.id.id); + kfree(ubagg_tjetty); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_jetty.h b/drivers/ub/urma/ubagg/ubagg_jetty.h new file mode 100644 index 000000000000..76fe7bbaa48a --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_jetty.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg jetty ops header + * Author: Weicheng Zhang + * Create: 2025-08-13 + * Note: + * History: 2025-08-13: Create file + */ + +#ifndef UBAGG_JETTY_H +#define UBAGG_JETTY_H + +#include "ubagg_types.h" + +struct ubcore_tjetty *ubagg_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_jfr(struct ubcore_tjetty *tjfr); + +struct ubcore_tjetty *ubagg_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_jetty(struct ubcore_tjetty *tjetty); + +#endif // UBAGG_SEG_H diff --git a/drivers/ub/urma/ubagg/ubagg_log.c b/drivers/ub/urma/ubagg/ubagg_log.c new file mode 100644 index 000000000000..99faac187eff --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_log.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg log file + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2024-1-14: Create file + */ + +#include +#include "ubagg_log.h" + +uint32_t g_ubagg_log_level = UBAGG_LOG_LEVEL_WARNING; diff --git a/drivers/ub/urma/ubagg/ubagg_log.h b/drivers/ub/urma/ubagg/ubagg_log.h new file mode 100644 index 000000000000..81c9a15d5fc5 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_log.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg log head file + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ + +#ifndef UBAGG_LOG_H +#define UBAGG_LOG_H + +#include +#include + +enum ubagg_log_level { + UBAGG_LOG_LEVEL_EMERG = 0, + UBAGG_LOG_LEVEL_ALERT = 1, + UBAGG_LOG_LEVEL_CRIT = 2, + UBAGG_LOG_LEVEL_ERR = 3, + UBAGG_LOG_LEVEL_WARNING = 4, + UBAGG_LOG_LEVEL_NOTICE = 5, + UBAGG_LOG_LEVEL_INFO = 6, + UBAGG_LOG_LEVEL_DEBUG = 7, + UBAGG_LOG_LEVEL_MAX = 8, +}; + +/* add log head info, "LogTag_UBAGG|function|[line]| */ +#define UBAGG_LOG_TAG "LogTag_UBAGG" +#define ubagg_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBAGG_LOG_TAG, __func__, __LINE__, ##args) + +#define UBAGG_RATELIMIT_INTERVAL (5 * HZ) +#define UBAGG_RATELIMIT_BURST 100 + +extern uint32_t g_ubagg_log_level; + +#define ubagg_log_info(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_INFO) \ + ubagg_log(info, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_err(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_ERR) \ + ubagg_log(err, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_warn(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_WARNING) \ + ubagg_log(warn, __VA_ARGS__); \ + } while (0) + +/* No need to record debug log by printk_ratelimited */ +#define ubagg_log_debug(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_DEBUG) \ + ubagg_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define ubagg_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_INFO)) \ + ubagg_log(info, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_ERR)) \ + ubagg_log(err, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_WARNING)) \ + ubagg_log(warn, __VA_ARGS__); \ + } while (0) + +#endif /* UBAGG_LOG_H */ diff --git a/drivers/ub/urma/ubagg/ubagg_main.c b/drivers/ub/urma/ubagg/ubagg_main.c new file mode 100644 index 000000000000..39626c3bdf02 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_main.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ + +#include +#include +#include +#include + +#include "ubagg_log.h" +#include "ubagg_ioctl.h" +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_hash_table.h" + +#define UBAGG_MODULE_NAME "ubagg" +#define UBAGG_DEVNODE_MODE (0666) +#define UBAGG_DEVICE_NAME UBAGG_MODULE_NAME +#define UBAGG_LOG_FILE_PERMISSION (0644) + +module_param(g_ubagg_log_level, uint, UBAGG_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubagg_log_level, + " 3: ERR, 4: WARNING, 5:NOTICE, 6: INFO, 7: DEBUG"); + +struct ubagg_ctx { + dev_t ubagg_devno; + struct cdev ubagg_cdev; + struct device *ubagg_dev; +}; + +static struct ubagg_ctx g_ubagg_ctx = { 0 }; + +static int ubagg_open(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static int ubagg_close(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static const struct file_operations g_ubagg_ops = { + .owner = THIS_MODULE, + .open = ubagg_open, + .release = ubagg_close, + .unlocked_ioctl = ubagg_ioctl, + .compat_ioctl = ubagg_ioctl, +}; + +static char *ubagg_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBAGG_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + +static struct class g_ubagg_class = { + .name = UBAGG_MODULE_NAME, + .devnode = ubagg_devnode, +}; + +static int ubagg_cdev_create(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_ubagg_ctx.ubagg_devno, 0, 1, + UBAGG_MODULE_NAME); + if (ret != 0) { + ubagg_log_err("alloc chrdev no for ubagg fail.\n"); + return ret; + } + + /* create /sys/class/ubagg */ + ret = class_register(&g_ubagg_class); + if (ret) { + ubagg_log_err("couldn't create class %s.\n", UBAGG_MODULE_NAME); + goto unregister_devno; + } + + cdev_init(&g_ubagg_ctx.ubagg_cdev, &g_ubagg_ops); + g_ubagg_ctx.ubagg_cdev.owner = THIS_MODULE; + + ret = cdev_add(&g_ubagg_ctx.ubagg_cdev, g_ubagg_ctx.ubagg_devno, 1); + if (ret != 0) { + ubagg_log_err("ubagg chrdev add failed, ret:%d.\n", ret); + goto class_unregister; + } + + /* /dev/ubagg */ + g_ubagg_ctx.ubagg_dev = device_create(&g_ubagg_class, NULL, + g_ubagg_ctx.ubagg_devno, NULL, + UBAGG_DEVICE_NAME); + if (IS_ERR(g_ubagg_ctx.ubagg_dev)) { + ret = (int)PTR_ERR(g_ubagg_ctx.ubagg_dev); + ubagg_log_err("couldn't create device %s, ret:%d.\n", + UBAGG_DEVICE_NAME, ret); + g_ubagg_ctx.ubagg_dev = NULL; + goto cdev_del; + } + ubagg_log_info("ubagg cdev,device and class created success.\n"); + + return 0; + +cdev_del: + cdev_del(&g_ubagg_ctx.ubagg_cdev); +class_unregister: + class_unregister(&g_ubagg_class); +unregister_devno: + unregister_chrdev_region(g_ubagg_ctx.ubagg_devno, 1); + + return ret; +} + +static void ubagg_cdev_destroy(void) +{ + device_destroy(&g_ubagg_class, g_ubagg_ctx.ubagg_cdev.dev); + g_ubagg_ctx.ubagg_dev = NULL; + cdev_del(&g_ubagg_ctx.ubagg_cdev); + class_unregister(&g_ubagg_class); + unregister_chrdev_region(g_ubagg_ctx.ubagg_devno, 1); +} + +static int __init ubagg_init(void) +{ + int ret = 0; + + ret = ubagg_cdev_create(); + if (ret != 0) { + ubagg_log_err("create cdev fail."); + return ret; + } + + return 0; +} + +static void __exit ubagg_exit(void) +{ + ubagg_delete_topo_map(); + ubagg_cdev_destroy(); +} + +module_init(ubagg_init); +module_exit(ubagg_exit); + +MODULE_DESCRIPTION("Kernel module for ubus"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ub/urma/ubagg/ubagg_seg.c b/drivers/ub/urma/ubagg/ubagg_seg.c new file mode 100644 index 000000000000..9caa67a97d68 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_seg.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_log.h" + +int ubagg_unregister_seg(struct ubcore_target_seg *seg) +{ + struct ubagg_seg_hash_node *seg_node = NULL; + struct ubagg_device *ubagg_dev = NULL; + + if (!seg || !seg->ub_dev) { + ubagg_log_err("Invalid param.\n"); + return -EINVAL; + } + + ubagg_dev = to_ubagg_dev(seg->ub_dev); + seg_node = (struct ubagg_seg_hash_node *)seg; + + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT], + &seg_node->hnode); + ubagg_bitmap_free_idx(ubagg_dev->segment_bitmap, seg_node->token_id); + kfree(seg_node); + return 0; +} + +struct ubcore_target_seg *ubagg_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_seg_hash_node *seg_node = NULL; + struct ubagg_seg_hash_node *tmp_seg = NULL; + struct ubagg_hash_table *ubagg_seg_ht = NULL; + struct ubagg_device *ubagg_dev = NULL; + int token_id = -1; + int ret = 0; + + if (!dev || !cfg || !udata) { + ubagg_log_err("Invalid param.\n"); + return ERR_PTR(-EINVAL); + } + + ubagg_dev = to_ubagg_dev(dev); + seg_node = kzalloc(sizeof(struct ubagg_seg_hash_node), GFP_KERNEL); + if (!seg_node) + return ERR_PTR(-ENOMEM); + + token_id = ubagg_bitmap_alloc_idx(ubagg_dev->segment_bitmap); + if (token_id < 0) { + ubagg_log_err("Fail to alloc token id.\n"); + goto FREE_SEG_NODE; + } + + seg_node->ubagg_seg.seg.token_id = token_id; + seg_node->token_id = token_id; + seg_node->ubagg_seg.ub_dev = dev; + ret = copy_from_user(&seg_node->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("Fail to copy data from user space, ret:%d.\n", + ret); + goto FREE_TOKEN_ID; + } + + ubagg_seg_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT]; + spin_lock(&ubagg_seg_ht->lock); + tmp_seg = ubagg_hash_table_lookup_nolock(ubagg_seg_ht, token_id, + &token_id); + if (tmp_seg != NULL) { + // should remove it + ubagg_hash_table_remove_nolock(ubagg_seg_ht, &tmp_seg->hnode); + spin_unlock(&ubagg_seg_ht->lock); + ubagg_log_err("Token id already exists.\n"); + kfree(tmp_seg); + goto FREE_TOKEN_ID; + } + + ubagg_hash_table_add_nolock(ubagg_seg_ht, &seg_node->hnode, token_id); + spin_unlock(&ubagg_seg_ht->lock); + + return &seg_node->ubagg_seg; + +FREE_TOKEN_ID: + ubagg_bitmap_free_idx(ubagg_dev->segment_bitmap, token_id); +FREE_SEG_NODE: + kfree(seg_node); + return ERR_PTR(-EINVAL); +} + +struct ubcore_target_seg *ubagg_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct ubcore_target_seg *tseg; + + if (ubagg_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL) { + ubagg_log_err("Invalid param"); + return NULL; + } + + tseg = kzalloc(sizeof(struct ubcore_target_seg), GFP_KERNEL); + if (tseg == NULL) + return NULL; + + return tseg; +} + +int ubagg_unimport_seg(struct ubcore_target_seg *tseg) +{ + if (tseg == NULL || tseg->ub_dev == NULL || tseg->uctx == NULL) { + ubagg_log_err("Invalid param"); + return -EINVAL; + } + + kfree(tseg); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_seg.h b/drivers/ub/urma/ubagg/ubagg_seg.h new file mode 100644 index 000000000000..f19044adcafa --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_seg.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_SEG_H +#define UBAGG_SEG_H + +#include "ubagg_types.h" +#include "ubagg_hash_table.h" + +struct ubcore_target_seg *ubagg_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unregister_seg(struct ubcore_target_seg *seg); + +struct ubcore_target_seg *ubagg_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_seg(struct ubcore_target_seg *tseg); + +int ubagg_init_seg_bitmap(void); + +int ubagg_init_seg_ht(void); + +void ubagg_uninit_seg_bitmap(void); + +void ubagg_uninit_seg_ht(void); + +#endif // UBAGG_SEG_H diff --git a/drivers/ub/urma/ubagg/ubagg_topo_info.c b/drivers/ub/urma/ubagg/ubagg_topo_info.c new file mode 100644 index 000000000000..9001c5a44d2a --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_topo_info.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg topo info file + * Author: Ma Chuan + * Create: 2025-06-07 + * Note: + * History: 2025-06-07 Create file + */ +#include +#include +#include "ubagg_log.h" +#include "ubagg_topo_info.h" + +static struct ubagg_topo_map *g_topo_map; + +struct ubagg_topo_map * +create_global_ubagg_topo_map(struct ubagg_topo_info *topo_infos, + uint32_t node_num) +{ + g_topo_map = create_ubagg_topo_map_from_user(topo_infos, node_num); + return g_topo_map; +} + +void delete_global_ubagg_topo_map(void) +{ + if (g_topo_map == NULL) + return; + delete_ubagg_topo_map(g_topo_map); + g_topo_map = NULL; +} + +struct ubagg_topo_map *get_global_ubagg_map(void) +{ + return g_topo_map; +} + +struct ubagg_topo_map * +create_ubagg_topo_map_from_user(struct ubagg_topo_info *user_topo_infos, + uint32_t node_num) +{ + struct ubagg_topo_map *topo_map = NULL; + int ret = 0; + + if (user_topo_infos == NULL || node_num <= 0 || + node_num > MAX_NODE_NUM) { + ubagg_log_err("Invalid param\n"); + return NULL; + } + topo_map = kzalloc(sizeof(struct ubagg_topo_map), GFP_KERNEL); + if (topo_map == NULL) + return NULL; + ret = copy_from_user(topo_map->topo_infos, + (void __user *)user_topo_infos, + sizeof(struct ubagg_topo_info) * node_num); + if (ret != 0) { + ubagg_log_err("Failed to copy topo infos\n"); + kfree(topo_map); + return NULL; + } + topo_map->node_num = node_num; + return topo_map; +} + +void delete_ubagg_topo_map(struct ubagg_topo_map *topo_map) +{ + if (topo_map == NULL) + return; + kfree(topo_map); +} diff --git a/drivers/ub/urma/ubagg/ubagg_topo_info.h b/drivers/ub/urma/ubagg/ubagg_topo_info.h new file mode 100644 index 000000000000..d9dcaba0d99d --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_topo_info.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg topo info head file + * Author: Ma Chuan + * Create: 2025-06-07 + * Note: + * History: 2025-06-07 Create file + */ +#ifndef UBAGG_TOPO_INFO_H +#define UBAGG_TOPO_INFO_H + +#include + +#define EID_LEN (16) +#define MAX_PORT_NUM (9) +#define MAX_NODE_NUM (16) +#define IODIE_NUM (2) + +struct ubagg_iodie_info { + char primary_eid[EID_LEN]; + char port_eid[MAX_PORT_NUM][EID_LEN]; + char peer_port_eid[MAX_PORT_NUM][EID_LEN]; + int socket_id; +}; + +struct ubagg_topo_info { + char bonding_eid[EID_LEN]; + struct ubagg_iodie_info io_die_info[IODIE_NUM]; + bool is_cur_node; +}; + +struct ubagg_topo_map { + struct ubagg_topo_info topo_infos[MAX_NODE_NUM]; + uint32_t node_num; +}; + +struct ubagg_topo_map * +create_global_ubagg_topo_map(struct ubagg_topo_info *topo_infos, + uint32_t node_num); + +void delete_global_ubagg_topo_map(void); + +struct ubagg_topo_map *get_global_ubagg_map(void); + +struct ubagg_topo_map * +create_ubagg_topo_map_from_user(struct ubagg_topo_info *topo_infos, + uint32_t node_num); + +void delete_ubagg_topo_map(struct ubagg_topo_map *topo_map); +#endif // UBAGG_TOPO_INFO_H diff --git a/drivers/ub/urma/ubagg/ubagg_types.h b/drivers/ub/urma/ubagg/ubagg_types.h new file mode 100644 index 000000000000..e9365b934cb7 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_types.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_TYPE_H +#define UBAGG_TYPE_H + +#include +#include + +#define UBAGG_DEV_MAX_NUM (20) +#define UBAGG_BITMAP_SIZE (10240) +#define UBAGG_MAX_DEV_NAME_LEN (64) +#define UBAGG_MAX_DEV_NUM (20) +#define ubagg_container_of(ptr, type, member) \ + (((ptr) == NULL) ? NULL : container_of(ptr, type, member)) + +enum ubagg_ht_param_num { + UBAGG_HT_SEGMENT_HT, + UBAGG_HT_JETTY_HT, + UBAGG_HT_JFR_HT, + UBAGG_HT_MAX, +}; + +struct ubagg_ht_param { + uint32_t size; + uint32_t node_offset; /* offset of hlist node in the hash table object */ + uint32_t key_offset; + uint32_t key_size; +}; + +struct ubagg_hash_table { + struct hlist_head *head; + struct ubagg_ht_param p; + spinlock_t lock; + struct kref kref; +}; + +struct ubagg_ubva { + union ubcore_eid eid; + uint32_t uasid; + uint64_t va; +} __packed; + +struct ubagg_seg_info { + struct ubagg_ubva ubva; + uint64_t len; + union ubcore_seg_attr attr; + uint32_t token_id; +}; + +// must be consistent with urma_bond_seg_info_out_t +struct ubagg_seg_exchange_info { + struct ubagg_seg_info base; + struct ubagg_seg_info slaves[UBAGG_DEV_MAX_NUM]; + int dev_num; +}; + +struct ubagg_seg_hash_node { + // ubagg_seg must be first! + struct ubcore_target_seg ubagg_seg; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_seg_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jetty_id { + union ubcore_eid eid; + uint32_t uasid; + uint32_t id; +}; + +struct ubagg_jetty_exchange_info { + struct ubagg_jetty_id base; + struct ubagg_jetty_id slaves[UBAGG_DEV_MAX_NUM]; + int dev_num; + bool is_in_matrix_server; + bool is_multipath; +}; + +struct ubagg_jetty_hash_node { + // base must be first! + struct ubcore_jetty base; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_jetty_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jfr_hash_node { + // base must be first! + struct ubcore_jfr base; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_jetty_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jfc { + struct ubcore_jfc base; +}; + +struct ubagg_jfs { + struct ubcore_jfs base; +}; + +struct ubagg_device { + struct ubcore_device ub_dev; + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + int slave_dev_num; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; + struct ubagg_hash_table ubagg_ht[UBAGG_HT_MAX]; + struct ubagg_bitmap *segment_bitmap; + struct ubagg_bitmap *jfs_bitmap; + struct ubagg_bitmap *jfr_bitmap; + struct ubagg_bitmap *jfc_bitmap; + struct ubagg_bitmap *jetty_bitmap; + struct list_head list_node; + struct kref ref; +}; + +static inline struct ubagg_device * +to_ubagg_dev(const struct ubcore_device *ub_dev) +{ + return (struct ubagg_device *)ubagg_container_of( + ub_dev, struct ubagg_device, ub_dev); +} + +#endif // UBAGG_TYPE_H diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile new file mode 100644 index 000000000000..a84edc0d0663 --- /dev/null +++ b/drivers/ub/urma/ubcore/Makefile @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# +ccflags-y += -I$(src) + +ubcore-objs := ubcore_log.o \ + ubcm/ubcm_log.o \ + ubcm/ub_mad.o \ + ubcm/ubcm_genl.o \ + ubcm/ub_cm.o \ + net/ubcore_session.o \ + net/ubcore_net.o \ + net/ubcore_sock.o \ + net/ubcore_cm.o \ + ubcm/ubmad_datapath.o \ + ubcore_topo_info.o \ + ubcore_cgroup.o \ + ubcore_dp.o \ + ubcore_hash_table.o \ + ubcore_netdev.o \ + ubcore_umem.o \ + ubcore_workqueue.o \ + ubcore_msg.o \ + ubcore_tp.o \ + ubcore_tp_table.o \ + ubcore_vtp.o \ + ubcore_cmd_tlv.o \ + ubcore_netlink.o \ + ubcore_segment.o \ + ubcore_uvs_cmd.o \ + ubcore_connect_adapter.o \ + ubcore_connect_bonding.o \ + ubcore_cdev_file.o \ + ubcore_device.o \ + ubcore_genl.o \ + ubcore_genl_admin.o \ + ubcore_jetty.o \ + ubcore_main.o \ + ubcore_ctp.o \ + ubcore_tpg.o \ + ubcore_utp.o \ + ubcore_uvs.o + +obj-$(CONFIG_UB_URMA) += ubcore.o diff --git a/drivers/ub/urma/ubcore/net/ubcore_cm.c b/drivers/ub/urma/ubcore/net/ubcore_cm.c new file mode 100644 index 000000000000..0aae3bab566b --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_cm.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connection manager implementation, interacts with ubcm + * Author: Wang Hang + * Create: 2025-02-18 + * Note: + * History: 2025-02-18: create file + */ + +#include "ubcore_log.h" +#include "ubcore_cm.h" + +static ubcore_cm_eid_ops g_eid_ops; +static ubcore_cm_send g_send; + +struct cm_entry { + union ubcore_eid addr; +}; + +static int ubcore_call_cm_send_ops(struct ubcore_device *dev, + struct ubcore_cm_send_buf *send_buf) +{ + if (g_send == NULL) { + ubcore_log_err("ubcore_cm_send_ops function not register yet!"); + return -1; + } + + return g_send(dev, send_buf); +} + +int ubcore_call_cm_eid_ops(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type) +{ + if (g_eid_ops == NULL) { + ubcore_log_err("ubcore_cm_eid_ops function not register yet!"); + return -1; + } + + return g_eid_ops(dev, eid_info, event_type); +} + +void ubcore_register_cm_eid_ops(ubcore_cm_eid_ops eid_ops) +{ + g_eid_ops = eid_ops; + ubcore_log_info("ubcore_cm_eid_ops function registered!"); +} + +void ubcore_register_cm_send_ops(ubcore_cm_send cm_send) +{ + g_send = cm_send; + ubcore_log_info("ubcore_cm_send_ops function registered!"); +} + +int ubcore_cm_recv(struct ubcore_device *dev, struct ubcore_cm_recv_cr *recv_cr) +{ + union ubcore_eid addr = recv_cr->cr->remote_id.eid; + struct ubcore_net_msg msg = { 0 }; + + (void)memcpy(&msg, (void *)(recv_cr->payload), MSG_HDR_SIZE); + msg.data = (void *)(recv_cr->payload + MSG_HDR_SIZE); + + ubcore_log_info("Handle cm message, " MSG_FMT, MSG_ARG(&msg)); + ubcore_net_handle_msg(dev, &msg, &addr); + + return 0; +} + +int ubcore_ubcm_send_to(struct ubcore_device *dev, union ubcore_eid addr, + struct ubcore_net_msg *msg) +{ + uint16_t send_buf_len = + sizeof(struct ubcore_cm_send_buf) + MSG_HDR_SIZE + msg->len; + struct ubcore_cm_send_buf *send_buf = NULL; + int ret; + + ubcore_log_info("Send cm message, " MSG_FMT, MSG_ARG(msg)); + + send_buf = kcalloc(1, send_buf_len, GFP_KERNEL); + if (IS_ERR_OR_NULL(send_buf)) { + ubcore_log_err("Failed to alloc cm send buf memory.\n"); + return -ENOMEM; + } + + send_buf->dst_eid = addr; + send_buf->msg_type = UBCORE_CM_CONN_MSG; + send_buf->payload_len = send_buf_len; + (void)memcpy(send_buf->payload, msg, MSG_HDR_SIZE); + (void)memcpy(send_buf->payload + MSG_HDR_SIZE, msg->data, msg->len); + + ret = ubcore_call_cm_send_ops(dev, send_buf); + if (ret != 0) + ubcore_log_err("Failed to send cm message, ret:%d, " MSG_FMT, + ret, MSG_ARG(msg)); + + kfree(send_buf); + return ret; +} + +int ubcore_ubcm_send(struct ubcore_device *dev, void *conn, + struct ubcore_net_msg *msg) +{ + return ubcore_ubcm_send_to(dev, *(union ubcore_eid *)conn, msg); +} diff --git a/drivers/ub/urma/ubcore/net/ubcore_cm.h b/drivers/ub/urma/ubcore/net/ubcore_cm.h new file mode 100644 index 000000000000..ed93a829a8af --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_cm.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connection manager header + * Author: Wang Hang + * Create: 2025-02-18 + * Note: + * History: 2025-02-18: create file + */ + +#ifndef UBCORE_CM_H +#define UBCORE_CM_H + +#include "ubcore_net.h" +#include + +typedef int (*ubcore_cm_eid_ops)(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type); +void ubcore_register_cm_eid_ops(ubcore_cm_eid_ops eid_ops); + +enum ubcore_cm_msg_type { + UBCORE_CM_CONN_MSG = 2, /* Consistent with UBMAD_UBC_CONN_DATA */ + UBCORE_CM_MSG_NUM +}; + +struct ubcore_cm_send_buf { + union ubcore_eid src_eid; /* [Optional] Initiator eid */ + union ubcore_eid dst_eid; /* [Mandatory] Target eid */ + uint32_t msg_type; /* [Mandatory] Refer to enum ubcore_cm_msg_type */ + uint32_t payload_len; /* [Mandatory] */ + uint8_t payload[0]; /* [Mandatory] */ +}; + +/** + * send by ubcm well-known jetty + * @param[in] dev: the ubcore_device handle; + * @param[in] send_buf: buffer to send, net message should be payload of send_buf; + * @return: 0 - Succeed to send; -EAGAIN - Try again later; Other value - Failed to send + */ +typedef int (*ubcore_cm_send)(struct ubcore_device *dev, + struct ubcore_cm_send_buf *send_buf); +void ubcore_register_cm_send_ops(ubcore_cm_send cm_send); + +struct ubcore_cm_recv_cr { + struct ubcore_cr *cr; + + /* remote eid see cr->remote_id.eid */ + union ubcore_eid local_eid; + + uint32_t msg_type; /* Refer to enum ubcore_cm_msg_type */ + uint64_t payload; + uint32_t payload_len; +}; +int ubcore_cm_recv(struct ubcore_device *dev, + struct ubcore_cm_recv_cr *recv_cr); + +int ubcore_call_cm_eid_ops(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type); + +int ubcore_ubcm_send(struct ubcore_device *dev, void *conn, + struct ubcore_net_msg *msg); +int ubcore_ubcm_send_to(struct ubcore_device *dev, union ubcore_eid addr, + struct ubcore_net_msg *msg); + +#endif /* UBCORE_CM_H */ diff --git a/drivers/ub/urma/ubcore/net/ubcore_net.c b/drivers/ub/urma/ubcore/net/ubcore_net.c new file mode 100644 index 000000000000..f4c5cbf4e6bb --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_net.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore net implementation + * Author: Wang Hang + * Create: 2025-06-09 + * Note: + * History: 2025-06-09: Create file + */ + +#include "ubcore_sock.h" +#include "ubcore_cm.h" + +#include "ubcore_log.h" +#include "ubcore_net.h" + +static const char *const msg_type_str_list[UBCORE_NET_MSG_MAX] = { + [UBCORE_NET_CREATE_REQ] = "create-req", + [UBCORE_NET_CREATE_RESP] = "create-resp", + [UBCORE_NET_CREATE_ACK] = "create-ack", + [UBCORE_NET_DESTROY_REQ] = "destroy-req", + [UBCORE_NET_DESTROY_RESP] = "destroy-resp", + [UBCORE_NET_BONDING_SEG_INFO_REQ] = "seg_req", + [UBCORE_NET_BONDING_SEG_INFO_RESP] = "seg_resp", + [UBCORE_NET_BONDING_JETTY_INFO_REQ] = "jetty_req", + [UBCORE_NET_BONDING_JETTY_INFO_RESP] = "jetty_resp", +}; + +enum ubcore_connect_type { + UBCORE_CONNECT_WK_JETTY = 0U, /* well-known jetty */ + UBCORE_CONNECT_SOCK, +}; + +uint32_t g_ubcore_connect_type = UBCORE_CONNECT_WK_JETTY; + +const char *msg_type_str(enum ubcore_net_msg_type type) +{ + size_t index = type; + + return (index < UBCORE_NET_MSG_MAX) ? msg_type_str_list[index] : + "unknown"; +} + +struct ubcore_net_msg_descriptor { + ubcore_net_msg_handler handler; + uint16_t msg_len; +}; + +static struct ubcore_net_msg_descriptor g_descriptors[UBCORE_NET_MSG_MAX] = { + 0 +}; + +int ubcore_net_register_msg_handler(enum ubcore_net_msg_type type, + ubcore_net_msg_handler handler, + uint16_t msg_len) +{ + size_t index = type; + + if (index >= UBCORE_NET_MSG_MAX || + g_descriptors[index].handler != NULL) { + ubcore_log_err("Failed to register net handler, type:%s", + msg_type_str(type)); + return -1; + } + g_descriptors[index].handler = handler; + g_descriptors[index].msg_len = msg_len; + return 0; +} + +void ubcore_net_handle_msg(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct ubcore_net_msg_descriptor *descriptor = NULL; + + if (msg->type >= UBCORE_NET_MSG_MAX) { + ubcore_log_err("Invalid net msg type, " MSG_FMT, MSG_ARG(msg)); + return; + } + + descriptor = &g_descriptors[msg->type]; + if (msg->len != descriptor->msg_len) { + ubcore_log_err("Inalid net msg len, expected: %u, " MSG_FMT, + descriptor->msg_len, MSG_ARG(msg)); + return; + } + if (descriptor->handler == NULL) { + ubcore_log_err("No handler for net msg, " MSG_FMT, + MSG_ARG(msg)); + return; + } + + descriptor->handler(dev, msg, conn); +} + +static bool ubcore_is_loopback(struct ubcore_device *dev, + union ubcore_eid *addr) +{ + uint32_t eid_idx; + + spin_lock(&dev->eid_table.lock); + for (eid_idx = 0; eid_idx < dev->eid_table.eid_cnt; eid_idx++) { + if (dev->eid_table.eid_entries[eid_idx].valid && + memcmp(addr, &dev->eid_table.eid_entries[eid_idx].eid, + sizeof(union ubcore_eid)) == 0) { + spin_unlock(&dev->eid_table.lock); + return true; + } + } + spin_unlock(&dev->eid_table.lock); + return false; +} + +int ubcore_net_send(struct ubcore_device *dev, struct ubcore_net_msg *msg, + void *conn) +{ + if (conn == NULL) { + ubcore_log_info("Loopback detected, using shortcut."); + ubcore_net_handle_msg(dev, msg, NULL); + return 0; + } + + switch (g_ubcore_connect_type) { + case UBCORE_CONNECT_WK_JETTY: + return ubcore_ubcm_send(dev, conn, msg); + case UBCORE_CONNECT_SOCK: + return ubcore_sock_send(dev, msg, conn); + default: + ubcore_log_err("connect type unrecognized!"); + return -1; + } +} + +int ubcore_net_send_to(struct ubcore_device *dev, struct ubcore_net_msg *msg, + union ubcore_eid addr) +{ + if (ubcore_is_loopback(dev, &addr)) { + ubcore_log_info("Loopback detected, using shortcut."); + ubcore_net_handle_msg(dev, msg, NULL); + return 0; + } + + switch (g_ubcore_connect_type) { + case UBCORE_CONNECT_WK_JETTY: + return ubcore_ubcm_send_to(dev, addr, msg); + case UBCORE_CONNECT_SOCK: + return ubcore_sock_send_to(dev, msg, addr); + default: + ubcore_log_err("connect type unrecognized!"); + return -1; + } +} + +int ubcore_net_comm_init(void) +{ + if (ubcore_session_init() != 0) { + ubcore_log_err("Failed to init session service"); + return -1; + } + if (ubcore_sock_init() != 0) { + ubcore_log_err("connect type unrecognized!"); + goto uninit_session; + } + return 0; + +uninit_session: + ubcore_session_uninit(); + return -1; +} + +void ubcore_net_comm_uninit(void) +{ + ubcore_sock_uninit(); + ubcore_session_uninit(); +} diff --git a/drivers/ub/urma/ubcore/net/ubcore_net.h b/drivers/ub/urma/ubcore/net/ubcore_net.h new file mode 100644 index 000000000000..d5bd3e999f47 --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_net.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore net header + * Author: Wang Hang + * Create: 2025-06-09 + * Note: + * History: 2025-06-09: create file + */ + +#ifndef NET_UBCORE_NET_H +#define NET_UBCORE_NET_H + +#include "ubcore_session.h" +#include + +/* Compatible with uvs_msg_type_t */ +enum ubcore_net_msg_type { + UBCORE_NET_CREATE_REQ, + UBCORE_NET_CREATE_RESP, + UBCORE_NET_CREATE_ACK, + UBCORE_NET_DESTROY_REQ, + UBCORE_NET_DESTROY_RESP, + UBCORE_NET_BONDING_SEG_INFO_REQ, + UBCORE_NET_BONDING_SEG_INFO_RESP, + UBCORE_NET_BONDING_JETTY_INFO_REQ, + UBCORE_NET_BONDING_JETTY_INFO_RESP, + UBCORE_NET_MSG_MAX, +}; + +/* Compatible with uvs_header_flag_t */ +union ubcore_net_msg_flag { + struct { + uint16_t live_migrate : 1; + uint16_t alpha : 1; + uint16_t reserved : 14; + } bs; + uint16_t value; +}; + +/* Compatible with uvs_base_header_t */ +struct ubcore_net_msg { + uint8_t version; + uint8_t type; // See enum ubcore_net_msg_type + uint16_t len; // Total length of payload + uint32_t session_id; // Message sequence number, to index session + uint16_t cap; // Capability, currently not used + union ubcore_net_msg_flag flag; // Flag, for msg extension + void *data; +}; + +#define MSG_HDR_SIZE offsetof(struct ubcore_net_msg, data) + +const char *__attribute_const__ msg_type_str(enum ubcore_net_msg_type type); + +#define UNPACK(...) __VA_ARGS__ +#define MSG_FMT "Msg[sid:%u %d.%s len:%u]" +#define MSG_ARG(m) \ + UNPACK(((m)->session_id), ((m)->type), (msg_type_str((m)->type)), \ + ((m)->len)) + +/** + * Callback function type for handling received network messages. + * @param[in] dev: Ubcore device. + * @param[in] msg: Received message (guaranteed valid length) + * @param[in] conn: Connector that received the message + */ +typedef void (*ubcore_net_msg_handler)(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn); + +extern uint32_t g_ubcore_connect_type; + +/** + * Register a callback to process received network messages of specified type. + * @param[in] type: Message type to handle + * @param[in] handler: Callback function to process received messages + * @param[in] msg_len: Expected length of received messages (for validation) + * @return: 0 on success, other value on error + */ +int ubcore_net_register_msg_handler(enum ubcore_net_msg_type type, + ubcore_net_msg_handler handler, + uint16_t msg_len); + +void ubcore_net_handle_msg(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn); + +/** + * Send a network message through a connector, usually used in ubcore_net_msg_handler. + * @param[in] dev: Ubcore device. + * @param[in] msg: Message to be sent + * @param[in] conn: Connector to destination + * @return: 0 on success, other value on error + */ +int ubcore_net_send(struct ubcore_device *dev, struct ubcore_net_msg *msg, + void *conn); + +/** + * Send a network message to a specific destination address, usually used for the initial message. + * @param[in] dev: Ubcore device. + * @param[in] msg: Message to be sent + * @param[in] addr: Destination address + * @return: 0 on success, other value on error + */ +int ubcore_net_send_to(struct ubcore_device *dev, struct ubcore_net_msg *msg, + union ubcore_eid addr); + +int ubcore_net_comm_init(void); +void ubcore_net_comm_uninit(void); + +#endif diff --git a/drivers/ub/urma/ubcore/net/ubcore_session.c b/drivers/ub/urma/ubcore/net/ubcore_session.c new file mode 100644 index 000000000000..1ee0402d11b8 --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_session.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore session implementation + * Author: Wang Hang + * Create: 2025-04-11 + * Note: + * History: 2025-04-11: Create file + */ + +#include +#include + +#include "ubcore_log.h" +#include "ubcore_session.h" + +struct ubcore_session { + struct ubcore_device *dev; + uint32_t session_id; + void *session_data; + struct kref ref; + struct list_head list_entry; + struct delayed_work delayed_work; + struct completion completion; + atomic_t cb_called; + ubcore_session_callback complete_cb; + ubcore_session_free_callback free_cb; +}; + +struct ubcore_session_context { + atomic_t next_id; + struct list_head list; + spinlock_t lock; + struct workqueue_struct *wq; +}; + +struct ubcore_session_context session_ctx = { 0 }; + +static inline void ubcore_session_add_to_list(struct ubcore_session *session) +{ + unsigned long flags; + + ubcore_session_ref_acquire(session); + spin_lock_irqsave(&session_ctx.lock, flags); + list_add_tail(&session->list_entry, &session_ctx.list); + spin_unlock_irqrestore(&session_ctx.lock, flags); + ubcore_log_info("Session %u add to list", session->session_id); +} + +static inline void +ubcore_session_remove_from_list(struct ubcore_session *session) +{ + unsigned long flags; + + spin_lock_irqsave(&session_ctx.lock, flags); + list_del(&session->list_entry); + spin_unlock_irqrestore(&session_ctx.lock, flags); + ubcore_session_ref_release(session); + ubcore_log_info("Session %u remove from list", session->session_id); +} + +static void ubcore_session_timeout(struct work_struct *work); + +struct ubcore_session * +ubcore_session_create(struct ubcore_device *dev, void *session_data, + uint32_t timeout, ubcore_session_callback complete_cb, + ubcore_session_free_callback free_cb) +{ + const uint32_t MAX_TIMEOUT = 30000; + struct ubcore_session *s; + uint32_t timeout_limited; + + if (timeout == 0 || timeout > MAX_TIMEOUT) + timeout_limited = MAX_TIMEOUT; + else + timeout_limited = timeout; + + s = kzalloc(sizeof(struct ubcore_session), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->dev = dev; + s->session_id = (uint32_t)atomic_inc_return(&session_ctx.next_id); + s->session_data = session_data; + INIT_DELAYED_WORK(&s->delayed_work, ubcore_session_timeout); + kref_init(&s->ref); + init_completion(&s->completion); + atomic_set(&s->cb_called, 0); + s->complete_cb = complete_cb; + s->free_cb = free_cb; + ubcore_session_add_to_list(s); + + if (!queue_delayed_work(session_ctx.wq, &s->delayed_work, + msecs_to_jiffies(timeout_limited))) + goto delete_session; + + return s; + +delete_session: + ubcore_session_remove_from_list(s); + return NULL; +} + +struct ubcore_session *ubcore_session_find(uint32_t session_id) +{ + struct ubcore_session *cur, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&session_ctx.lock, flags); + list_for_each_entry(cur, &session_ctx.list, list_entry) { + if (cur->session_id == session_id) { + target = cur; + ubcore_session_ref_acquire(target); + break; + } + } + spin_unlock_irqrestore(&session_ctx.lock, flags); + return target; +} + +static void ubcore_session_timeout(struct work_struct *work) +{ + struct ubcore_session *session = + container_of(work, struct ubcore_session, delayed_work.work); + + if (atomic_cmpxchg(&session->cb_called, 0, 1) == 1) + return; + + ubcore_log_err("Session %u timeout\n", session->session_id); + + if (session->complete_cb != NULL) + session->complete_cb(session->dev, session->session_data); + complete(&session->completion); + ubcore_session_remove_from_list(session); +} + +void ubcore_session_complete(struct ubcore_session *session) +{ + if (atomic_cmpxchg(&session->cb_called, 0, 1) == 1) + return; + + ubcore_log_info("Session %u complete\n", session->session_id); + cancel_delayed_work_sync(&session->delayed_work); + + if (session->complete_cb != NULL) + session->complete_cb(session->dev, session->session_data); + complete(&session->completion); + ubcore_session_remove_from_list(session); +} + +void ubcore_session_wait(struct ubcore_session *session) +{ + wait_for_completion(&session->completion); +} + +static void ubcore_session_free(struct kref *kref) +{ + struct ubcore_session *session = + container_of(kref, struct ubcore_session, ref); + + if (session->session_data != NULL) { + if (session->free_cb == NULL) + session->free_cb = kfree; + (session->free_cb)(session->session_data); + } + kfree(session); +} + +void ubcore_session_ref_acquire(struct ubcore_session *session) +{ + kref_get(&session->ref); +} + +void ubcore_session_ref_release(struct ubcore_session *session) +{ + kref_put(&session->ref, ubcore_session_free); +} + +uint32_t ubcore_session_get_id(struct ubcore_session *session) +{ + return session->session_id; +} + +void *ubcore_session_get_data(struct ubcore_session *session) +{ + return session->session_data; +} + +void ubcore_session_flush(struct ubcore_device *dev) +{ + struct ubcore_session *session = NULL; + unsigned long flags; + + spin_lock_irqsave(&session_ctx.lock, flags); + list_for_each_entry(session, &session_ctx.list, list_entry) { + mod_delayed_work(session_ctx.wq, &session->delayed_work, 0); + } + spin_unlock_irqrestore(&session_ctx.lock, flags); + + flush_workqueue(session_ctx.wq); +} + +int ubcore_session_init(void) +{ + atomic_set(&session_ctx.next_id, 0); + INIT_LIST_HEAD(&session_ctx.list); + spin_lock_init(&session_ctx.lock); + + session_ctx.wq = alloc_workqueue("%s", 0, 1, "ubcore-session"); + if (session_ctx.wq == NULL) { + ubcore_log_err("Fail to alloc session workqueue."); + return -1; + } + return 0; +} + +void ubcore_session_uninit(void) +{ + struct ubcore_session *session = NULL; + unsigned long flags; + + spin_lock_irqsave(&session_ctx.lock, flags); + list_for_each_entry(session, &session_ctx.list, list_entry) { + mod_delayed_work(session_ctx.wq, &session->delayed_work, 0); + } + spin_unlock_irqrestore(&session_ctx.lock, flags); + + drain_workqueue(session_ctx.wq); + destroy_workqueue(session_ctx.wq); +} diff --git a/drivers/ub/urma/ubcore/net/ubcore_session.h b/drivers/ub/urma/ubcore/net/ubcore_session.h new file mode 100644 index 000000000000..eb0b986f37e6 --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_session.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore session header + * Author: Wang Hang + * Create: 2025-04-11 + * Note: + * History: 2025-04-11: create file + */ + +#ifndef NET_UBCORE_SESSION_H +#define NET_UBCORE_SESSION_H + +#include + +struct ubcore_session; + +typedef void (*ubcore_session_callback)(struct ubcore_device *dev, + const void *session_data); +typedef void (*ubcore_session_free_callback)(const void *session_data); + +/** + * Creates a new session, caller must release the reference using session_ref_release. This callback + * guarantees that it will be called exactly once. If the session is not explicitly completed + * by calling ubcore_session_complete, it will be automatically called when timeout occurs. + * @param[in] session_data: User data associated with the session + * @param[in] timeout: Session timeout in milliseconds, timer starts upon creation + * @param[in] complete_cb: Callback for session completion + * @param[in] free_cb: Callback for session_data cleanup, (if NULL, uses kfree) + * @return: Pointer to new session with acquired reference + */ +struct ubcore_session * +ubcore_session_create(struct ubcore_device *dev, void *session_data, + uint32_t timeout, ubcore_session_callback complete_cb, + ubcore_session_free_callback free_cb); + +/** + * Finds a session by its ID, caller must release the reference using session_ref_release. + * @param[in] session_id: Session ID to search for + * @return: Found session pointer with acquired reference, NULL if not found + */ +struct ubcore_session *ubcore_session_find(uint32_t session_id); + +/** + * Marks a session as completed. Invokes the completion callback if the session hasn't timed out. + * @param[in] session: Target session + */ +void ubcore_session_complete(struct ubcore_session *session); + +/** + * Blocks caller until the session completes or times out. + * @param[in] session: Target session + */ +void ubcore_session_wait(struct ubcore_session *session); + +/** + * Acquire a reference to the session. + * @param[in] session: Target session + */ +void ubcore_session_ref_acquire(struct ubcore_session *session); + +/** + * Releases a reference to the session. + * @param[in] session: Target session + */ +void ubcore_session_ref_release(struct ubcore_session *session); + +/** + * Get session ID. + * @param[in] session: Target session + * @return" Unique session ID + */ +uint32_t ubcore_session_get_id(struct ubcore_session *session); + +/** + * Get session user data. + * @param[in] session: Target session + * @return: User data provided during session creation + */ +void *ubcore_session_get_data(struct ubcore_session *session); + +void ubcore_session_flush(struct ubcore_device *dev); +int ubcore_session_init(void); +void ubcore_session_uninit(void); + +#endif diff --git a/drivers/ub/urma/ubcore/net/ubcore_sock.c b/drivers/ub/urma/ubcore/net/ubcore_sock.c new file mode 100644 index 000000000000..8d86e490b11b --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_sock.c @@ -0,0 +1,676 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore sock implementation + * Author: Wang Hang + * Create: 2025-02-18 + * Note: + * History: 2025-02-18: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ubcore_log.h" +#include "ubcore_priv.h" +#include "ubcore_sock.h" + +#define IPV4_MAP_IPV6_PREFIX 0x0000ffff +#define SOCK_PORT 1226 +#define SOCK_MSG_MAX_LEN 4096 + +enum sk_type { + SOCK_LISTEN, + SOCK_CONNECT, + SOCK_ACCEPT, +}; + +struct sk_entry { + struct list_head all_list_entry; + struct list_head ready_list_entry; + struct kref ref; + spinlock_t sk_lock; + struct socket *sock; + enum sk_type sk_type; + union ubcore_net_addr_union ub_addr; + atomic_t inactive; + // for epoll + wait_queue_head_t *whead; + wait_queue_entry_t wait; + poll_table pt; +}; + +struct sk_msg_descriptor { + ubcore_net_msg_handler handler; + uint16_t msg_len; +}; + +struct sk_service { + struct task_struct *daemon; + struct list_head all_list; + struct list_head ready_list; + uint16_t port; + struct sk_msg_descriptor descriptor[UBCORE_NET_MSG_MAX]; + spinlock_t lock; + wait_queue_head_t wq; +}; + +static void k_parse_addr(union ubcore_net_addr_union *ub_addr, uint16_t port, + struct sockaddr *sk_addr, int *sk_addr_size) +{ + if (ub_addr->in4.reserved1 == 0 && + ub_addr->in4.reserved2 == htonl(IPV4_MAP_IPV6_PREFIX)) { + struct sockaddr_in *sk_addr_in = (struct sockaddr_in *)sk_addr; + + sk_addr_in->sin_family = AF_INET; + sk_addr_in->sin_port = htons(port); + sk_addr_in->sin_addr.s_addr = ub_addr->in4.addr; + + *sk_addr_size = (int)sizeof(*sk_addr_in); + } else { + struct sockaddr_in6 *sk_addr_in6 = + (struct sockaddr_in6 *)sk_addr; + + sk_addr_in6->sin6_family = AF_INET6; + sk_addr_in6->sin6_port = htons(port); + memcpy(&sk_addr_in6->sin6_addr, &ub_addr->in6, + sizeof(ub_addr->in6)); + + *sk_addr_size = (int)sizeof(*sk_addr_in6); + } +} + +static int k_setsockopt_keepalive(struct socket *sock) +{ + int keepidle = 5; + int keepcnt = 3; + + sock_set_keepalive(sock->sk); + tcp_sock_set_keepcnt(sock->sk, keepcnt); + tcp_sock_set_keepidle(sock->sk, keepidle); + tcp_sock_set_keepintvl(sock->sk, keepidle); + + return 0; +} + +static inline void k_setsockopt_reuse(struct socket *sock) +{ + sock->sk->sk_reuse = true; +} + +static inline void k_close(struct socket *sock) +{ + sock_release(sock); +} + +static struct socket *k_listen(union ubcore_net_addr_union *ub_addr, + uint16_t port) +{ + struct socket *sock = NULL; + struct sockaddr_in6 sk_addr_inner = { 0 }; + struct sockaddr *sk_addr = (struct sockaddr *)&sk_addr_inner; + int sk_addr_size, backlog = 128; + int ret; + + k_parse_addr(ub_addr, port, sk_addr, &sk_addr_size); + + ret = sock_create(sk_addr->sa_family, SOCK_STREAM, 0, &sock); + if (ret < 0) { + ubcore_log_err("Failed to create socket"); + return NULL; + } + + k_setsockopt_reuse(sock); + + ret = kernel_bind(sock, sk_addr, sk_addr_size); + if (ret < 0) { + ubcore_log_err("Failed to call kernel_bind, ret: %d.\n", ret); + goto destroy_sock; + } + + ret = kernel_listen(sock, backlog); + if (ret < 0) { + ubcore_log_err("Failed to call kernel_listen"); + goto destroy_sock; + } + + return sock; + +destroy_sock: + k_close(sock); + return NULL; +} + +static struct socket *k_accept(struct socket *sock, + union ubcore_net_addr_union *ub_addr) +{ + struct socket *newsock = NULL; + struct sockaddr_in6 sk_addr_inner = { 0 }; + struct sockaddr *sk_addr = (struct sockaddr *)&sk_addr_inner; + int ret; + + ret = kernel_accept(sock, &newsock, 0); + if (ret < 0) { + ubcore_log_err("Failed to call kernel_accept"); + return NULL; + } + + ret = kernel_getpeername(newsock, sk_addr); + if (ret == sizeof(struct sockaddr_in6) && + sk_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *sk_addr_in6 = + (struct sockaddr_in6 *)sk_addr; + + memcpy(&ub_addr->in6, &sk_addr_in6->sin6_addr, + sizeof(sk_addr_in6->sin6_addr)); + } else if (ret == sizeof(struct sockaddr_in) && + sk_addr->sa_family == AF_INET) { + struct sockaddr_in *sk_addr_in = (struct sockaddr_in *)sk_addr; + + ub_addr->in4.addr = sk_addr_in->sin_addr.s_addr; + ub_addr->in4.reserved2 = IPV4_MAP_IPV6_PREFIX; + } else { + ubcore_log_err("Failed to call kernel_getpeername"); + goto destroy_sock; + } + + ret = k_setsockopt_keepalive(newsock); + if (ret < 0) { + ubcore_log_err("Failed to set socket keepalive"); + goto destroy_sock; + } + + return newsock; + +destroy_sock: + k_close(newsock); + return NULL; +} + +static struct socket *k_connect(union ubcore_net_addr_union *ub_addr, + uint16_t port) +{ + struct socket *sock = NULL; + struct sockaddr_in6 sk_addr_inner = { 0 }; + struct sockaddr *sk_addr = (struct sockaddr *)&sk_addr_inner; + int sk_addr_size; + int ret; + + k_parse_addr(ub_addr, port, sk_addr, &sk_addr_size); + + ret = sock_create(sk_addr->sa_family, SOCK_STREAM, 0, &sock); + if (ret < 0) { + ubcore_log_err("Failed to create socket"); + return NULL; + } + + ret = kernel_connect(sock, sk_addr, sk_addr_size, 0); + if (ret < 0) { + ubcore_log_err("Failed to call kernel_connect"); + goto destroy_sock; + } + + ret = k_setsockopt_keepalive(sock); + if (ret != 0) { + ubcore_log_err("Failed to set socket keepalive"); + goto destroy_sock; + } + + return sock; + +destroy_sock: + k_close(sock); + return NULL; +} + +static int k_recvmsg(struct socket *sock, void *buf, size_t buf_size, int flags) +{ + struct msghdr msg = { 0 }; + struct kvec vec; + + vec.iov_base = buf; + vec.iov_len = buf_size; + + return kernel_recvmsg(sock, &msg, &vec, 1, buf_size, flags); +} + +static struct sk_service ss = { 0 }; + +static void sk_entry_release(struct kref *ref) +{ + struct sk_entry *entry = container_of(ref, struct sk_entry, ref); + + k_close(entry->sock); + kfree(entry); +} + +static inline void sk_entry_ref_acquire(struct sk_entry *entry) +{ + kref_get(&entry->ref); +} + +static inline void sk_entry_ref_release(struct sk_entry *entry) +{ + kref_put(&entry->ref, sk_entry_release); +} + +static inline void sk_entry_add_to_all_list(struct sk_entry *entry) +{ + list_add(&entry->all_list_entry, &ss.all_list); +} + +static inline void sk_entry_remove_from_all_list(struct sk_entry *entry) +{ + list_del(&entry->all_list_entry); +} + +static inline void sk_entry_add_to_ready_list(struct sk_entry *entry) +{ + // Check if the current entry is in the list + if (list_empty(&entry->ready_list_entry)) + list_add(&entry->ready_list_entry, &ss.ready_list); +} + +static inline void sk_entry_remove_from_ready_list(struct sk_entry *entry) +{ + // Check if the current entry is in the list + // The entry is Initialized after removing from the ready list, + // prepared for future insertion. + if (!list_empty(&entry->ready_list_entry)) + list_del_init(&entry->ready_list_entry); +} + +static int sk_wait_callback(wait_queue_entry_t *wait, unsigned int mode, + int sync, void *key) +{ + struct sk_entry *entry = container_of(wait, struct sk_entry, wait); + unsigned long flags; + + if (atomic_read(&entry->inactive)) + return 0; + + spin_lock_irqsave(&ss.lock, flags); + sk_entry_add_to_ready_list(entry); + smp_mb(); + // memory barrier required + if (waitqueue_active(&ss.wq)) + wake_up_locked(&ss.wq); + spin_unlock_irqrestore(&ss.lock, flags); + return 0; +} + +static void sk_add_wait_queue(struct file *file, wait_queue_head_t *whead, + poll_table *pt) +{ + struct sk_entry *entry = container_of(pt, struct sk_entry, pt); + + init_waitqueue_func_entry(&entry->wait, sk_wait_callback); + add_wait_queue(whead, &entry->wait); + entry->whead = whead; +} + +static struct sk_entry *sk_entry_create(struct socket *sock, + enum sk_type sk_type, + union ubcore_net_addr_union ub_addr) +{ + struct sk_entry *entry; + __poll_t events; + unsigned long flags; + + entry = kmalloc(sizeof(struct sk_entry), GFP_KERNEL); + if (IS_ERR_OR_NULL(entry)) { + ubcore_log_err("Failed to alloc sock entry"); + return NULL; + } + kref_init(&entry->ref); + spin_lock_init(&entry->sk_lock); + entry->sock = sock; + entry->sk_type = sk_type; + entry->ub_addr = ub_addr; + atomic_set(&entry->inactive, 0); + entry->whead = NULL; + INIT_LIST_HEAD(&entry->ready_list_entry); + + spin_lock_irqsave(&ss.lock, flags); + sk_entry_add_to_all_list(entry); + entry->pt._qproc = sk_add_wait_queue; + entry->pt._key = POLLIN | POLLERR | POLLHUP | POLLRDHUP; + events = (*entry->sock->ops->poll)(NULL, entry->sock, &entry->pt); + entry->pt._qproc = NULL; + + if (events & EPOLLIN) { + sk_entry_add_to_ready_list(entry); + smp_mb(); + // memory barrier required + if (waitqueue_active(&ss.wq)) + wake_up_locked(&ss.wq); + } else if (unlikely(events & EPOLLRDHUP)) { + ubcore_log_err( + "EPOLLRDHUP event detected: peer socket closed or suspended"); + } + spin_unlock_irqrestore(&ss.lock, flags); + + ubcore_log_info("Create sock entry, type:%d, addr:" EID_FMT, sk_type, + EID_ARGS(ub_addr)); + return entry; +} + +static struct sk_entry *sk_entry_create_listen(union ubcore_net_addr_union addr) +{ + struct sk_entry *entry; + struct socket *sock; + + sock = k_listen(&addr, ss.port); + if (sock == NULL) { + ubcore_log_err("Failed to create listen socket"); + return NULL; + } + + entry = sk_entry_create(sock, SOCK_LISTEN, addr); + if (entry == NULL) { + ubcore_log_err("Failed to create sock entry"); + k_close(sock); + } + return entry; +} + +static struct sk_entry *sk_entry_create_accept(struct socket *sock) +{ + struct sk_entry *entry; + struct socket *newsock; + union ubcore_net_addr_union addr = { 0 }; + + newsock = k_accept(sock, &addr); + if (newsock == NULL) { + ubcore_log_err("Failed to create accept socket"); + return NULL; + } + + entry = sk_entry_create(newsock, SOCK_ACCEPT, addr); + if (entry == NULL) { + ubcore_log_err("Failed to register accept sock entry"); + k_close(newsock); + } + return entry; +} + +static struct sk_entry * +sk_entry_create_connect(union ubcore_net_addr_union addr) +{ + struct sk_entry *entry; + struct socket *sock; + + sock = k_connect(&addr, ss.port); + if (sock == NULL) { + ubcore_log_err("Failed to create connect socket"); + return NULL; + } + + entry = sk_entry_create(sock, SOCK_CONNECT, addr); + if (entry == NULL) { + ubcore_log_err("Failed to create sock entry"); + k_close(sock); + } + return entry; +} + +static struct sk_entry *sk_entry_find_by_addr(union ubcore_net_addr_union addr) +{ + struct sk_entry *cur, *tmp, *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&ss.lock, flags); + list_for_each_entry_safe(cur, tmp, &ss.all_list, all_list_entry) { + if (cur->sk_type != SOCK_LISTEN && + memcmp(&cur->ub_addr, &addr, sizeof(addr)) == 0) { + entry = cur; + sk_entry_ref_acquire(entry); + break; + } + } + spin_unlock_irqrestore(&ss.lock, flags); + + if (entry != NULL) + return entry; + + entry = sk_entry_create_connect(addr); + if (entry == NULL) { + ubcore_log_err("Failed to create connect sock entry\n"); + return NULL; + } + sk_entry_ref_acquire(entry); + return entry; +} + +static int sk_handle_msg(struct sk_entry *entry) +{ + struct ubcore_net_msg msg = { 0 }; + struct ubcore_device *dev = NULL; + size_t ret; + + ret = k_recvmsg(entry->sock, &msg, MSG_HDR_SIZE, 0); + if (ret != MSG_HDR_SIZE || msg.len > SOCK_MSG_MAX_LEN) { + ubcore_log_err("Failed to recv sock hdr, recv: %zu, " MSG_FMT, + ret, MSG_ARG(&msg)); + return -1; + } + + msg.data = kcalloc(1, msg.len, GFP_KERNEL); + if (IS_ERR_OR_NULL(msg.data)) { + ubcore_log_err("Failed to alloc sock msg data, " MSG_FMT, + MSG_ARG(&msg)); + return -1; + } + + ret = k_recvmsg(entry->sock, msg.data, msg.len, 0); + if (ret != msg.len) { + ubcore_log_err("Failed to recv sock data, recv: %zu, " MSG_FMT, + ret, MSG_ARG(&msg)); + kfree(msg.data); + return -1; + } + + dev = ubcore_find_device((union ubcore_eid *)&entry->ub_addr, + (enum ubcore_transport_type)(msg.cap)); + if (dev == NULL) { + ubcore_log_err( + "Failed to find device when handle sock msg, " MSG_FMT, + MSG_ARG(&msg)); + kfree(msg.data); + return 0; + } + + ubcore_log_info("Handle sock message, " MSG_FMT, MSG_ARG(&msg)); + ubcore_net_handle_msg(dev, &msg, (void *)entry); + + ubcore_put_device(dev); + + kfree(msg.data); + return 0; +} + +static int sk_event_loop(void *data) +{ + wait_queue_entry_t wait; + + init_waitqueue_entry(&wait, current); + + while (!kthread_should_stop()) { + ktime_t expires; + ktime_t time_limit = ktime_set(300, 0); + unsigned long flags; + + spin_lock_irqsave(&ss.lock, flags); + while (!list_empty(&ss.ready_list)) { + int is_sock_inactive = 0; + struct sk_entry *cur; + __poll_t events; + + cur = list_first_entry(&ss.ready_list, struct sk_entry, + ready_list_entry); + events = (*cur->sock->ops->poll)(NULL, cur->sock, + &cur->pt); + + ubcore_log_info("Events occur, %x, addr:" EID_FMT, + events, EID_ARGS(cur->ub_addr)); + if (events & (POLLERR | POLLHUP | POLLRDHUP)) + is_sock_inactive = -1; + else if (events & POLLIN) { + spin_unlock_irqrestore(&ss.lock, flags); + if (cur->sk_type == SOCK_LISTEN) + (void)sk_entry_create_accept(cur->sock); + else + is_sock_inactive = sk_handle_msg(cur); + spin_lock_irqsave(&ss.lock, flags); + } else { + // No remaining data to process. + sk_entry_remove_from_ready_list(cur); + } + // Prevent the socket from being re-added to the ready list + if (is_sock_inactive) { + atomic_set(&cur->inactive, 1); + sk_entry_remove_from_ready_list(cur); + sk_entry_remove_from_all_list(cur); + + // Release sock will invoke sk_wait_callback + spin_unlock_irqrestore(&ss.lock, flags); + sk_entry_ref_release(cur); + spin_lock_irqsave(&ss.lock, flags); + } + } + spin_unlock_irqrestore(&ss.lock, flags); + + if (signal_pending(current)) { + ubcore_log_err("pending signal"); + break; + } + + set_current_state(TASK_INTERRUPTIBLE); + __add_wait_queue_exclusive(&ss.wq, &wait); + expires = ktime_add(ktime_get(), time_limit); + schedule_hrtimeout_range(&expires, 0, HRTIMER_MODE_ABS); + set_current_state(TASK_RUNNING); + __remove_wait_queue(&ss.wq, &wait); + } + + while (!list_empty(&ss.all_list)) { + struct sk_entry *cur; + + cur = list_first_entry(&ss.all_list, struct sk_entry, + all_list_entry); + if (cur->whead) + __remove_wait_queue(cur->whead, &cur->wait); + sk_entry_remove_from_ready_list(cur); + sk_entry_remove_from_all_list(cur); + sk_entry_ref_release(cur); + } + return 0; +} + +static int ubcore_sock_send_inner(struct ubcore_device *dev, + struct ubcore_net_msg *msg, + struct sk_entry *entry) +{ + struct msghdr msghdr = { 0 }; + struct kvec vec[2] = { 0 }; + const int vec_num = 2; + int ret; + + ubcore_log_info("Send sock message, " MSG_FMT, MSG_ARG(msg)); + + msg->cap = dev->transport_type; + msghdr.msg_flags = 0; + vec[0].iov_base = msg; + vec[0].iov_len = MSG_HDR_SIZE; + vec[1].iov_base = msg->data; + vec[1].iov_len = msg->len; + + spin_lock(&entry->sk_lock); + ret = kernel_sendmsg(entry->sock, &msghdr, vec, vec_num, + MSG_HDR_SIZE + msg->len); + spin_unlock(&entry->sk_lock); + + return ret; +} + +int ubcore_sock_send(struct ubcore_device *dev, struct ubcore_net_msg *msg, + void *conn) +{ + struct sk_entry *entry = conn; + + return ubcore_sock_send_inner(dev, msg, entry); +} + +int ubcore_sock_send_to(struct ubcore_device *dev, struct ubcore_net_msg *msg, + union ubcore_eid addr) +{ + struct sk_entry *entry; + int ret; + + entry = sk_entry_find_by_addr( + *((union ubcore_net_addr_union *)(&addr))); + if (entry == NULL) + return -1; + ret = ubcore_sock_send_inner(dev, msg, entry); + sk_entry_ref_release(entry); + return ret; +} + +int ubcore_sock_init(void) +{ + union ubcore_net_addr_union any_addr6 = { 0 }; + + ubcore_log_info("sock service init\n"); + + INIT_LIST_HEAD(&ss.all_list); + INIT_LIST_HEAD(&ss.ready_list); + spin_lock_init(&ss.lock); + ss.port = SOCK_PORT; + init_waitqueue_head(&ss.wq); + + if (sk_entry_create_listen(any_addr6) == NULL) { + ubcore_log_err("Failed to register ipv6 listen sock entry"); + return -1; + } + + ss.daemon = kthread_run(sk_event_loop, NULL, "sk_event_loop"); + if (!ss.daemon) { + ubcore_log_err("sock thread launch failed"); + return -1; + } + return 0; +} + +void ubcore_sock_uninit(void) +{ + ubcore_log_info("sock service uninit\n"); + + if (ss.daemon) { + smp_mb(); + // memory barrier required + if (waitqueue_active(&ss.wq)) + wake_up(&ss.wq); + kthread_stop(ss.daemon); + ss.daemon = NULL; + } +} diff --git a/drivers/ub/urma/ubcore/net/ubcore_sock.h b/drivers/ub/urma/ubcore/net/ubcore_sock.h new file mode 100644 index 000000000000..dba34cfe0421 --- /dev/null +++ b/drivers/ub/urma/ubcore/net/ubcore_sock.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore sock header + * Author: Wang Hang + * Create: 2025-02-18 + * Note: + * History: 2025-02-18: create file + */ + +#ifndef NET_UBCORE_SOCK_H +#define NET_UBCORE_SOCK_H + +#include "ubcore_net.h" +#include + +int ubcore_sock_send(struct ubcore_device *dev, struct ubcore_net_msg *msg, + void *conn); +int ubcore_sock_send_to(struct ubcore_device *dev, struct ubcore_net_msg *msg, + union ubcore_eid addr); + +int ubcore_sock_init(void); +void ubcore_sock_uninit(void); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.c b/drivers/ub/urma/ubcore/ubcm/ub_cm.c new file mode 100644 index 000000000000..a0ca45fc6d31 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_cm implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "ubcm_log.h" +#include "ubcm_genl.h" +#include "ub_mad.h" +#include "ub_cm.h" + +#define UBCM_LOG_FILE_PERMISSION (0644) + +#define UBCM_MODULE_NAME "ubcm" +#define UBCM_DEVNO_MODE (0666) +#define UBCM_DEVICE_NAME "ubcm" + +module_param(g_ubcm_log_level, uint, UBCM_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubcm_log_level, " 3: ERR, 4: WARNING, 6: INFO, 7: DEBUG"); + +struct ubcm_device { + struct kref kref; + struct list_head list_node; + struct ubcore_device *device; + struct ubmad_agent *agent; + spinlock_t agent_lock; +}; + +static struct ubcm_context g_ubcm_ctx = { 0 }; +struct ubcm_context *get_ubcm_ctx(void) +{ + return &g_ubcm_ctx; +} + +static int ubcm_open(struct inode *i_node, struct file *filp) +{ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + return 0; +} + +static int ubcm_close(struct inode *i_node, struct file *filp) +{ + module_put(THIS_MODULE); + return 0; +} + +static const struct file_operations g_ubcm_ops = { + .owner = THIS_MODULE, + .open = ubcm_open, + .release = ubcm_close, + .unlocked_ioctl = NULL, /* ubcm does not support ioctl currently */ + .compat_ioctl = NULL, +}; + +static int ubcm_add_device(struct ubcore_device *device); +static void ubcm_remove_device(struct ubcore_device *device, void *client_ctx); + +static struct ubcore_client g_ubcm_client = { .list_node = LIST_HEAD_INIT( + g_ubcm_client.list_node), + .client_name = UBCM_MODULE_NAME, + .add = ubcm_add_device, + .remove = ubcm_remove_device }; + +static char *ubcm_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBCM_DEVNO_MODE; + + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + +static struct class g_ubcm_class = { + .name = UBCM_MODULE_NAME, + .devnode = ubcm_devnode, +}; + +static int ubcm_get_ubc_dev(struct ubcore_device *device) +{ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + atomic_inc(&device->use_cnt); + return 0; +} + +static void ubcm_put_ubc_dev(struct ubcore_device *device) +{ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("Invalid parameter.\n"); + return; + } + + if (atomic_dec_and_test(&device->use_cnt)) + complete(&device->comp); +} + +static void ubcm_get_device(struct ubcm_device *cm_dev) +{ + kref_get(&cm_dev->kref); +} + +static void ubcm_kref_release(struct kref *kref) +{ + struct ubcm_device *cm_dev = + container_of(kref, struct ubcm_device, kref); + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_agent *agent; + + /* Delayed work should be flushed before resource destroy */ + flush_workqueue(cm_ctx->wq); + if (!IS_ERR_OR_NULL(cm_dev->agent)) { + agent = cm_dev->agent; + spin_lock(&cm_dev->agent_lock); + cm_dev->agent = NULL; + spin_unlock(&cm_dev->agent_lock); + (void)ubmad_unregister_agent(agent); + } + + if (!IS_ERR_OR_NULL(cm_dev->device)) { + ubcm_put_ubc_dev(cm_dev->device); + cm_dev->device = NULL; + } + + kfree(cm_dev); +} + +static void ubcm_put_device(struct ubcm_device *cm_dev) +{ + uint32_t refcnt; + + refcnt = kref_read(&cm_dev->kref); + ubcm_log_info("ubcm kref put, old refcnt: %u, new refcnt: %u.\n", + refcnt, refcnt > 0 ? refcnt - 1 : 0); + + kref_put(&cm_dev->kref, ubcm_kref_release); +} + +static int ubcm_send_handler(struct ubmad_agent *agent, + struct ubmad_send_cr *send_cr) +{ + /* Note: agent & send_buf cannot be NULL, no need to check */ + if (IS_ERR_OR_NULL(send_cr->cr)) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + if (send_cr->cr->status != UBCORE_CR_SUCCESS) { + ubcm_log_err("Cr status error: %d.\n", + (int)send_cr->cr->status); + return -EINVAL; + } + + return 0; +} + +static int ubcm_recv_handler(struct ubmad_agent *agent, + struct ubmad_recv_cr *recv_cr) +{ + /* Note: agent & recv_buf cannot be NULL, no need to check */ + struct ubcm_uvs_genl_node *uvs; + struct ubcm_nlmsg *nlmsg; + int ret; + + switch (recv_cr->msg_type) { + case UBMAD_CONN_DATA: + nlmsg = ubcm_alloc_genl_msg(recv_cr); + break; + case UBMAD_UBC_CONN_DATA: + ret = ubcore_cm_recv(agent->device, + (struct ubcore_cm_recv_cr *)recv_cr); + if (ret != 0) + ubcm_log_err( + "Failed to handle message by ubcore net, ret: %d.\n", + ret); + return ret; + case UBMAD_AUTHN_DATA: + nlmsg = ubcm_alloc_genl_authn_msg(recv_cr); + break; + default: + ubcm_log_err("Invalid msg_type: %u.\n", recv_cr->msg_type); + return -EINVAL; + } + + if (IS_ERR_OR_NULL(nlmsg)) + return -ENOMEM; + + uvs = ubcm_find_get_uvs_by_eid(&nlmsg->dst_eid); + if (uvs == NULL) { + ret = -1; + goto free_nlmsg; + } + + ret = ubcm_genl_unicast(nlmsg, ubcm_nlmsg_len(nlmsg), uvs); + if (ret != 0) + ubcm_log_err("Failed to send genl msg.\n"); + ubcm_uvs_kref_put(uvs); +free_nlmsg: + kfree(nlmsg); + return ret; +} + +static int ubcm_add_device(struct ubcore_device *device) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev; + int ret; + + cm_dev = kzalloc(sizeof(struct ubcm_device), GFP_KERNEL); + if (cm_dev == NULL) + return -ENOMEM; + + kref_init(&cm_dev->kref); + spin_lock_init(&cm_dev->agent_lock); + ret = ubcm_get_ubc_dev(device); + if (ret != 0) + goto put_dev; + cm_dev->device = device; + ubcore_set_client_ctx_data(device, &g_ubcm_client, cm_dev); + + /* Currently no send_handler needed */ + cm_dev->agent = ubmad_register_agent(device, ubcm_send_handler, + ubcm_recv_handler, (void *)cm_dev); + if (IS_ERR_OR_NULL(cm_dev->agent)) { + ubcm_log_err("Failed to register mad agent.\n"); + ret = PTR_ERR(cm_dev->agent); + goto put_dev; + } + + spin_lock(&cm_ctx->device_lock); + list_add_tail(&cm_dev->list_node, &cm_ctx->device_list); + spin_unlock(&cm_ctx->device_lock); + + return 0; +put_dev: + /* Note: cm_dev will free next */ + ubcm_put_device(cm_dev); + return ret; +} + +static void ubcm_remove_device(struct ubcore_device *device, void *client_ctx) +{ + struct ubcm_device *cm_dev = (struct ubcm_device *)client_ctx; + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + + if (cm_dev->device != device) { + ubcm_log_err("Invalid parameter.\n"); + return; + } + spin_lock(&cm_ctx->device_lock); + list_del(&cm_dev->list_node); + spin_unlock(&cm_ctx->device_lock); + + ubcm_put_device(cm_dev); +} + +void ubcm_work_handler(struct work_struct *work) +{ + struct ubcm_work *cm_work = container_of(work, struct ubcm_work, work); + struct ubmad_send_buf *send_buf = cm_work->send_buf; + struct ubmad_send_buf *bad_send_buf; + struct ubcm_device *cm_dev; + int ret; + + if (IS_ERR_OR_NULL(send_buf)) { + ubcm_log_err("Invalid parameter.\n"); + goto free_work; + } + + cm_dev = ubcm_find_get_device(&send_buf->src_eid); + if (IS_ERR_OR_NULL(cm_dev) || IS_ERR_OR_NULL(cm_dev->device)) { + ubcm_log_err("Failed to find ubcm device, src_eid: " EID_FMT + ".\n", + EID_ARGS(send_buf->src_eid)); + goto free_send_buf; + } + /* Source eid should be default eid0 for wk_jetty */ + send_buf->src_eid = cm_dev->device->eid_table.eid_entries[0].eid; + + ret = ubmad_post_send(cm_dev->device, send_buf, &bad_send_buf); + if (ret != 0) + ubcm_log_err("Failed to post send mad, ret: %d.\n", ret); + ubcm_put_device(cm_dev); + +free_send_buf: + kfree(send_buf); +free_work: + kfree(cm_work); +} + +static int ubcm_base_init(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + int ret; + + INIT_LIST_HEAD(&cm_ctx->device_list); + spin_lock_init(&cm_ctx->device_lock); + + cm_ctx->wq = alloc_workqueue(UBCM_MODULE_NAME, 0, 1); + if (IS_ERR_OR_NULL(cm_ctx->wq)) { + ubcm_log_err("Failed to alloc ubcm workqueue.\n"); + return -ENOMEM; + } + + ret = ubcore_register_client(&g_ubcm_client); + if (ret != 0) { + ubcm_log_err("Failed to register ubcm client, ret: %d.\n", ret); + destroy_workqueue(cm_ctx->wq); + cm_ctx->wq = NULL; + } + + return ret; +} + +static void ubcm_base_uninit(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev, *next; + + ubcore_unregister_client(&g_ubcm_client); + destroy_workqueue(cm_ctx->wq); + cm_ctx->wq = NULL; + + spin_lock(&cm_ctx->device_lock); + list_for_each_entry_safe(cm_dev, next, &cm_ctx->device_list, + list_node) { + list_del(&cm_dev->list_node); + ubcm_put_device(cm_dev); + } + spin_unlock(&cm_ctx->device_lock); +} + +struct ubcm_device *ubcm_find_get_device(union ubcore_eid *eid) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev, *next, *target = NULL; + struct ubcore_device *dev; + uint32_t idx; + + spin_lock(&cm_ctx->device_lock); + list_for_each_entry_safe(cm_dev, next, &cm_ctx->device_list, + list_node) { + dev = cm_dev->device; + spin_lock(&dev->eid_table.lock); + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + spin_unlock(&dev->eid_table.lock); + continue; + } + for (idx = 0; idx < dev->attr.dev_cap.max_eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0) { + target = cm_dev; + (void)ubcm_get_device(target); + break; + } + } + spin_unlock(&dev->eid_table.lock); + if (target != NULL) + break; + } + spin_unlock(&cm_ctx->device_lock); + + return target; +} + +static int ubcm_cdev_create(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + int ret; + + ret = alloc_chrdev_region(&cm_ctx->ubcm_devno, 0, 1, UBCM_MODULE_NAME); + if (ret != 0) { + ubcm_log_err("Failed to alloc chrdev region, ret: %d.\n", ret); + return ret; + } + + /* create /sys/class/ubcm */ + ret = class_register(&g_ubcm_class); + if (ret != 0) { + ubcm_log_err("Failed to register ubcm class, ret: %d.\n", ret); + goto unreg_devno; + } + + cdev_init(&cm_ctx->ubcm_cdev, &g_ubcm_ops); + cm_ctx->ubcm_cdev.owner = THIS_MODULE; + + ret = cdev_add(&cm_ctx->ubcm_cdev, cm_ctx->ubcm_devno, 1); + if (ret != 0) { + ubcm_log_err("Failed to add ubcm chrdev, ret: %d.\n", ret); + goto unreg_class; + } + + /* create /dev/ubcm */ + cm_ctx->ubcm_dev = device_create(&g_ubcm_class, NULL, + cm_ctx->ubcm_devno, NULL, + UBCM_DEVICE_NAME); + if (IS_ERR_OR_NULL(cm_ctx->ubcm_dev)) { + ret = -1; + ubcm_log_err("Failed to create ubcm device, ret: %d.\n", + (int)PTR_ERR(cm_ctx->ubcm_dev)); + cm_ctx->ubcm_dev = NULL; + goto del_cdev; + } + + ubcm_log_info("Finish to create ubcm chrdev.\n"); + return 0; +del_cdev: + cdev_del(&cm_ctx->ubcm_cdev); +unreg_class: + class_unregister(&g_ubcm_class); +unreg_devno: + unregister_chrdev_region(cm_ctx->ubcm_devno, 1); + return ret; +} + +static void ubcm_cdev_destroy(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + + device_destroy(&g_ubcm_class, cm_ctx->ubcm_cdev.dev); + cm_ctx->ubcm_dev = NULL; + cdev_del(&cm_ctx->ubcm_cdev); + class_unregister(&g_ubcm_class); + unregister_chrdev_region(cm_ctx->ubcm_devno, 1); +} + +int ubcm_init(void) +{ + int ret; + + ret = ubmad_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ub_mad, ret: %d.\n", ret); + return ret; + } + + ret = ubcm_base_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ubcm base, ret: %d.\n", ret); + goto uninit_mad; + } + + ret = ubcm_cdev_create(); + if (ret != 0) { + ubcm_log_err("Failed to create ubcm chrdev, ret: %d.\n", ret); + goto uninit_base; + } + + ret = ubcm_genl_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ubcm generic netlink, ret: %d.\n", + ret); + goto destroy_cdev; + } + ubcore_register_cm_send_ops(ubmad_ubc_send); + + pr_info("ubcm module init success.\n"); + return 0; +destroy_cdev: + ubcm_cdev_destroy(); +uninit_base: + ubcm_base_uninit(); +uninit_mad: + ubmad_uninit(); + return ret; +} + +void ubcm_uninit(void) +{ + ubcm_genl_uninit(); + ubcm_cdev_destroy(); + ubcm_base_uninit(); + ubmad_uninit(); + pr_info("ubcm module exits.\n"); +} diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.h b/drivers/ub/urma/ubcore/ubcm/ub_cm.h new file mode 100644 index 000000000000..0b74da0900c0 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_cm header + * Author: Chen Yutao + * Create: 2025-01-20 + * Note: + * History: 2025-01-20: Create file + */ + +#ifndef UB_CM_H +#define UB_CM_H + +#include +#include + +#include +#include "net/ubcore_cm.h" + +#include "ub_mad.h" +#include "ubcm_genl.h" + +struct ubcm_context { + struct list_head device_list; + spinlock_t device_lock; + struct workqueue_struct *wq; + dev_t ubcm_devno; + struct cdev ubcm_cdev; + struct device *ubcm_dev; +}; + +struct ubcm_work { + struct work_struct work; + struct ubmad_send_buf *send_buf; +}; + +struct ubcm_context *get_ubcm_ctx(void); + +/* Note: kref will increase of ubcm_device in this operation */ +struct ubcm_device *ubcm_find_get_device(union ubcore_eid *eid); + +void ubcm_work_handler(struct work_struct *work); + +int ubcm_init(void); +void ubcm_uninit(void); + +#endif /* UB_CM_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad.c b/drivers/ub/urma/ubcore/ubcm/ub_mad.c new file mode 100644 index 000000000000..a675d7cf389a --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad.c @@ -0,0 +1,1277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_mad implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include + +#include +#include +#include "ubcore_tp.h" +#include "ubcm_log.h" + +#include "ub_mad_priv.h" + +// udma jetty id starts from 1 currently +#define WK_JETTY_ID_INITIALIZER \ + { \ + UBMAD_WK_JETTY_ID_0, UBMAD_WK_JETTY_ID_1 \ + } +static const uint32_t g_ubmad_wk_jetty_id[UBMAD_WK_JETTY_NUM] = + WK_JETTY_ID_INITIALIZER; + +static struct list_head g_ubmad_device_list; +static DEFINE_SPINLOCK(g_ubmad_device_list_lock); + +static struct list_head g_ubmad_agent_list; +static DEFINE_SPINLOCK(g_ubmad_agent_list_lock); +static DEFINE_MUTEX(g_ubc_eid_lock); + +/* forward declaration */ +// device +static int +ubmad_create_device_priv_resources(struct ubmad_device_priv *dev_priv); +static void +ubmad_destroy_device_priv_resources(struct ubmad_device_priv *dev_priv); +static struct ubmad_device_priv * +ubmad_get_device_priv_lockless(struct ubcore_device *device); + +/* common */ +struct ubmad_bitmap *ubmad_create_bitmap(uint32_t bitmap_size) +{ + struct ubmad_bitmap *bitmap; + + bitmap = kcalloc(1, sizeof(struct ubmad_bitmap), GFP_KERNEL); + if (IS_ERR_OR_NULL(bitmap)) + return ERR_PTR(-ENOMEM); + bitmap->size = bitmap_size; + bitmap->bits = kcalloc(BITS_TO_LONGS(bitmap_size), + sizeof(unsigned long), GFP_KERNEL); + if (IS_ERR_OR_NULL(bitmap->bits)) { + kfree(bitmap); + return ERR_PTR(-ENOMEM); + } + spin_lock_init(&bitmap->lock); + return bitmap; +} + +void ubmad_destroy_bitmap(struct ubmad_bitmap *bitmap) +{ + if (bitmap->bits != NULL) + kfree(bitmap->bits); + kfree(bitmap); +} + +uint32_t ubmad_bitmap_get_id(struct ubmad_bitmap *bitmap) +{ + uint32_t id; + + spin_lock(&bitmap->lock); + id = find_first_zero_bit(bitmap->bits, bitmap->size); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("bitmap find zero bit failed\n"); + return id; + } + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return id; +} + +int ubmad_bitmap_put_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return -EINVAL; + } + clear_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +bool ubmad_bitmap_test_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + bool result; + + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return false; + } + result = test_bit(id, bitmap->bits) == 0; + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + + return result; +} + +int ubmad_bitmap_set_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return -1; + } + + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +/* ubcore event ops */ +// re-create dev resources when add eid after open dev +static void ubmad_event_cb(struct ubcore_event *event, + struct ubcore_event_handler *handler) +{ + switch (event->event_type) { + case UBCORE_EVENT_EID_CHANGE: + ubcm_log_info("No need to handle eid event.\n"); + break; + default: + ubcm_log_err("Invalid event_type: %d, dev_name: %s.\n", + event->event_type, event->ub_dev->dev_name); + break; + } +} + +static int ubmad_check_eid_in_dev(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info) +{ + int i; + + spin_lock(&dev->eid_table.lock); + for (i = 0; i < dev->eid_table.eid_cnt; i++) { + if (memcmp(&dev->eid_table.eid_entries[i].eid, &eid_info->eid, + sizeof(union ubcore_eid)) == 0 && + dev->eid_table.eid_entries[i].eid_index == + eid_info->eid_index) { + spin_unlock(&dev->eid_table.lock); + return 0; + } + } + spin_unlock(&dev->eid_table.lock); + return -1; +} + +static int +ubmad_update_device_priv_resources(struct ubmad_device_priv *dev_priv, + struct ubcore_eid_info *eid_info) +{ + int ret; + + if (memcmp(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)) == 0 && + dev_priv->eid_info.eid_index == eid_info->eid_index) { + ubcm_log_warn( + "eid_info is not changed, no need to update rsrc\n"); + return 0; + } + ubmad_destroy_device_priv_resources(dev_priv); + dev_priv->has_create_jetty_rsrc = false; + + (void)memcpy(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)); + dev_priv->eid_info.eid_index = eid_info->eid_index; + ret = ubmad_create_device_priv_resources(dev_priv); + if (ret != 0) { + ubcm_log_err("Failed to create device resources, ret: %d.\n", + ret); + return ret; + } + dev_priv->has_create_jetty_rsrc = true; + ubcm_log_info( + "Success to update priv resources: dev: %s eid_idx %d, " EID_FMT, + dev_priv->device->dev_name, dev_priv->eid_info.eid_index, + EID_ARGS(dev_priv->eid_info.eid)); + return 0; +} + +static int ubmad_ubc_eid_ops_inner(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + int ret; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(dev); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + dev->dev_name); + return -1; + } + switch (event_type) { + case UBCORE_MGMT_EVENT_EID_ADD: + if (dev_priv->has_create_jetty_rsrc) { + ret = ubmad_update_device_priv_resources(dev_priv, + eid_info); + if (ret != 0) + ubcm_log_err( + "Failed to update device resources, ret: %d.\n", + ret); + } else { + (void)memcpy(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)); + dev_priv->eid_info.eid_index = eid_info->eid_index; + ret = ubmad_create_device_priv_resources(dev_priv); + if (ret != 0) + ubcm_log_err( + "Failed to create device resources, ret: %d.\n", + ret); + else + dev_priv->has_create_jetty_rsrc = true; + } + break; + case UBCORE_MGMT_EVENT_EID_RMV: + ubmad_destroy_device_priv_resources(dev_priv); + dev_priv->has_create_jetty_rsrc = false; + ret = 0; + break; + default: + ubcm_log_err("Invali event_type: %d.\n", event_type); + return -EINVAL; + } + ubcm_log_info("Finish to handle new eid, ret: %d, event_type: %d.\n", + ret, (int)event_type); + ubmad_put_device_priv(dev_priv); + return ret; +} + +// re-create dev resources when dispatch management event +static int ubmad_ubc_eid_ops(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type) +{ + int ret; + + mutex_lock(&g_ubc_eid_lock); + if (ubmad_check_eid_in_dev(dev, eid_info) != 0) { + mutex_unlock(&g_ubc_eid_lock); + ubcm_log_err("Eid is not in dev, dev_name: %s, eid: " EID_FMT + ", eid_index: %u.\n", + dev->dev_name, EID_ARGS(eid_info->eid), + eid_info->eid_index); + return -1; + } + + ret = ubmad_ubc_eid_ops_inner(dev, eid_info, event_type); + if (ret != 0) { + mutex_unlock(&g_ubc_eid_lock); + ubcm_log_err("Failed to handle eid_ops_called, ret: %d.\n", + ret); + return ret; + } + mutex_unlock(&g_ubc_eid_lock); + return 0; +} + +/* jetty ops */ +static struct ubcore_jfc *ubmad_create_jfc_s(struct ubcore_device *device) +{ + struct ubcore_jfc_cfg jfc_cfg = { 0 }; + struct ubcore_jfc *jfc = NULL; + int rearm_ret; + + jfc_cfg.depth = UBMAD_JFS_DEPTH; + jfc = ubcore_create_jfc(device, &jfc_cfg, ubmad_jfce_handler_s, NULL, + NULL); + if (IS_ERR_OR_NULL(jfc)) { + ubcm_log_err("create jfc_s failed\n"); + return jfc; + } + + rearm_ret = ubcore_rearm_jfc(jfc, false); + if (rearm_ret != 0) { + ubcm_log_err("rearm jfc_s failed. ret %d\n", rearm_ret); + return NULL; + } + + return jfc; +} + +static struct ubcore_jfc *ubmad_create_jfc_r(struct ubcore_device *device) +{ + struct ubcore_jfc_cfg jfc_cfg = { 0 }; + struct ubcore_jfc *jfc = NULL; + int rearm_ret; + + jfc_cfg.depth = UBMAD_JFR_DEPTH; + jfc = ubcore_create_jfc(device, &jfc_cfg, ubmad_jfce_handler_r, NULL, + NULL); + if (IS_ERR_OR_NULL(jfc)) { + ubcm_log_err("create jfc_r failed\n"); + return jfc; + } + + rearm_ret = ubcore_rearm_jfc(jfc, false); + if (rearm_ret != 0) { + ubcm_log_err("rearm jfc_r failed. ret %d\n", rearm_ret); + return NULL; + } + + return jfc; +} + +static struct ubcore_jfr *ubmad_create_jfr(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r) +{ + struct ubcore_jfr_cfg jfr_cfg = { 0 }; + + jfr_cfg.id = 0U; + jfr_cfg.depth = UBMAD_JFR_DEPTH; + jfr_cfg.flag.bs.token_policy = UBCORE_TOKEN_NONE; + jfr_cfg.trans_mode = UBCORE_TP_RM; + jfr_cfg.eid_index = dev_priv->eid_info.eid_index; + jfr_cfg.max_sge = UBMAD_JFR_MAX_SGE_NUM; + jfr_cfg.jfc = jfc_r; + + return ubcore_create_jfr(dev_priv->device, &jfr_cfg, NULL, NULL); +} + +static struct ubcore_jetty * +ubmad_create_jetty(struct ubmad_device_priv *dev_priv, struct ubcore_jfc *jfc_s, + struct ubcore_jfc *jfc_r, struct ubcore_jfr *jfr, + uint32_t jetty_id) +{ + struct ubcore_jetty_cfg jetty_cfg = { 0 }; + + jetty_cfg.id = jetty_id; + jetty_cfg.flag.bs.share_jfr = 1; + jetty_cfg.trans_mode = UBCORE_TP_RM; + jetty_cfg.eid_index = dev_priv->eid_info.eid_index; + jetty_cfg.jfs_depth = UBMAD_JFS_DEPTH; + jetty_cfg.priority = 0; /* Highest priority */ + jetty_cfg.max_send_sge = UBMAD_JFS_MAX_SGE_NUM; + jetty_cfg.max_send_rsge = UBMAD_JFS_MAX_SGE_NUM; + jetty_cfg.jfr_depth = UBMAD_JFR_DEPTH; + jetty_cfg.max_recv_sge = UBMAD_JFR_MAX_SGE_NUM; + jetty_cfg.send_jfc = jfc_s; + jetty_cfg.recv_jfc = jfc_r; + jetty_cfg.jfr = jfr; + jetty_cfg.err_timeout = UBMAD_JETTY_ERR_TIMEOUT; + + return ubcore_create_jetty(dev_priv->device, &jetty_cfg, NULL, NULL); +} + +static struct ubmad_tjetty * +ubmad_get_tjetty_lockless(struct ubmad_jetty_resource *rsrc, uint32_t hash, + union ubcore_eid *dst_eid) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[hash], + node) { + if (memcmp(&tjetty->tjetty->cfg.id.eid, dst_eid, + sizeof(union ubcore_eid)) == 0) { + kref_get(&tjetty->kref); + return tjetty; + } + } + return NULL; +} + +struct ubmad_tjetty *ubmad_get_tjetty(union ubcore_eid *dst_eid, + struct ubmad_jetty_resource *rsrc) +{ + unsigned long flag; + uint32_t hash = jhash(dst_eid, sizeof(union ubcore_eid), 0) % + UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty = NULL; + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + return tjetty; +} + +static void ubmad_release_tjetty(struct kref *kref) +{ + struct ubmad_tjetty *tjetty = + container_of(kref, struct ubmad_tjetty, kref); + int ret; + + ubmad_uninit_msn_mgr(&tjetty->msn_mgr); + + ret = ubcore_unimport_jetty(tjetty->tjetty); + if (ret != 0) + ubcore_log_err("Failed to unimport jetty, ret: %d.\n", ret); + kfree(tjetty); +} + +void ubmad_put_tjetty(struct ubmad_tjetty *tjetty) +{ + kref_put(&tjetty->kref, ubmad_release_tjetty); +} + +static int ubmad_fill_get_tp_cfg(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *get_tp_cfg, + struct ubcore_tjetty_cfg *cfg) +{ + uint32_t eid_index = cfg->eid_index; + + get_tp_cfg->flag.bs.ctp = 1; + get_tp_cfg->flag.bs.rtp = 0; + get_tp_cfg->flag.bs.utp = 0; + + get_tp_cfg->trans_mode = cfg->trans_mode; + + spin_lock(&dev->eid_table.lock); + if (eid_index >= dev->eid_table.eid_cnt || + dev->eid_table.eid_entries == NULL || + dev->eid_table.eid_entries[eid_index].valid == false) { + spin_unlock(&dev->eid_table.lock); + ubcore_log_err("Invalid parameter, eid_index: %u.\n", + eid_index); + return -EINVAL; + } + /* Need to adapt bonding primary eid */ + get_tp_cfg->local_eid = dev->eid_table.eid_entries[eid_index].eid; + spin_unlock(&dev->eid_table.lock); + get_tp_cfg->peer_eid = cfg->id.eid; + + return 0; +} + +static struct ubcore_tjetty * +ubmad_import_jetty_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; + struct ubcore_tp_info tp_list = { 0 }; + struct ubcore_tjetty *tjetty = NULL; + uint32_t tp_cnt = 1; + int ret; + + if (!ubcore_have_tp_ctrlplane_ops(dev) || + dev->ops->unimport_jfr == NULL || cfg == NULL || + dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + if (ubmad_fill_get_tp_cfg(dev, &get_tp_cfg, cfg) != 0) + return NULL; + + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_list, NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err("Failed to get tp list, ret: %d, tp_cnt: %u.\n", + ret, tp_cnt); + return NULL; + } + active_tp_cfg.tp_handle = tp_list.tp_handle; + ubcore_log_info("Finish to get tp, tpid: %u, tp_cnt: %u, leid: " EID_FMT + ", deid: " EID_FMT ".\n", + (uint32_t)tp_list.tp_handle.bs.tpid, + (uint32_t)tp_list.tp_handle.bs.tp_cnt, + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + + tjetty = ubcore_import_jetty_ex(dev, cfg, &active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) + ubcore_log_err("Failed to import jetty ex.\n"); + + return tjetty; +} + +/* need to put twice to release tjetty. + * First put for kref_get() is called by user after finish using tjetty locally. + * Second put for kref_init() is in ubmad_unimport_jetty(). + */ +struct ubmad_tjetty *ubmad_import_jetty(struct ubcore_device *device, + struct ubmad_jetty_resource *rsrc, + union ubcore_eid *dst_eid) +{ + unsigned long flag; + uint32_t hash = jhash(dst_eid, sizeof(union ubcore_eid), 0) % + UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty = NULL, *new_tjetty = NULL; + struct ubcore_tjetty *new_target = NULL; + struct ubcore_tjetty_cfg tjetty_cfg = { 0 }; + + /* get first */ + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); // put by user + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + if (!IS_ERR_OR_NULL(tjetty)) { + ubcm_log_info("tjetty0 already imported. eid " EID_FMT "\n", + EID_ARGS(*dst_eid)); + return tjetty; + } + + /* not exist, import then */ + new_tjetty = kzalloc(sizeof(struct ubmad_tjetty), GFP_KERNEL); + if (IS_ERR_OR_NULL(new_tjetty)) + return ERR_PTR(-ENOMEM); + kref_init(&new_tjetty->kref); // put in ubmad_unimport_jetty() + + tjetty_cfg.id.id = rsrc->jetty_id; + tjetty_cfg.id.eid = *dst_eid; + tjetty_cfg.flag.bs.token_policy = UBCORE_TOKEN_NONE; + tjetty_cfg.trans_mode = UBCORE_TP_RM; + tjetty_cfg.type = UBCORE_JETTY; + tjetty_cfg.eid_index = rsrc->jetty->jetty_cfg.eid_index; + new_target = ubmad_import_jetty_compat(device, &tjetty_cfg, NULL); + if (IS_ERR_OR_NULL(new_target)) { + ubcm_log_err("import tjetty: %u failed. eid " EID_FMT "\n", + rsrc->jetty_id, EID_ARGS(*dst_eid)); + goto free; + } + new_tjetty->tjetty = new_target; + + ubmad_init_msn_mgr(&new_tjetty->msn_mgr); + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + /* get again in case of concurrence */ + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); // put by user + if (!IS_ERR_OR_NULL(tjetty)) { + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + ubcm_log_info( + "tjetty0 already imported. dev_name: %s, deid " EID_FMT + ".\n", + device->dev_name, EID_ARGS(*dst_eid)); + goto uninit_msn_mgr; + } + + /* still not exist, use new_tjetty */ + // kref_get for new imported tjetty to unify put logics with tjetty got from hlist + kref_get(&new_tjetty->kref); + + // add to hlist + INIT_HLIST_NODE(&new_tjetty->node); + hlist_add_head(&new_tjetty->node, &rsrc->tjetty_hlist[hash]); + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + ubcm_log_info( + "import tjetty0 and add to hlist succeeded. dev_name: %s, deid " EID_FMT + ".\n", + device->dev_name, EID_ARGS(*dst_eid)); + return new_tjetty; + +uninit_msn_mgr: + ubmad_uninit_msn_mgr(&new_tjetty->msn_mgr); + ubcore_unimport_jetty(new_target); +free: + kfree(new_tjetty); + return tjetty; +} + +static void ubmad_unimport_jetty(struct ubmad_tjetty *tjetty) +{ + ubmad_put_tjetty(tjetty); // second put for ubmad_import_jetty() +} + +void ubmad_remove_tjetty(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + + ubcm_log_info("Remove tjetty, leid: " EID_FMT ", reid: " EID_FMT ".\n", + EID_ARGS(rsrc->jetty->jetty_id.eid), EID_ARGS(*seid)); + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[hash], + node) { + if (memcmp(&tjetty->tjetty->cfg.id.eid, seid, + sizeof(union ubcore_eid)) == 0) { + hlist_del(&tjetty->node); + ubmad_unimport_jetty(tjetty); + } + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); +} + +/* seg ops */ +static struct ubcore_target_seg *ubmad_register_seg(struct ubcore_device *dev, + uint32_t num_sge) +{ + void *seg_va = NULL; + union ubcore_reg_seg_flag flag = { 0 }; + uint64_t seg_len = UBMAD_SGE_MAX_LEN * num_sge; + struct ubcore_seg_cfg cfg = { 0 }; + struct ubcore_target_seg *ret; + + seg_va = kzalloc(seg_len, GFP_KERNEL); + if (IS_ERR_OR_NULL(seg_va)) + return ERR_PTR(-ENOMEM); + flag.bs.token_policy = UBCORE_TOKEN_NONE; + flag.bs.cacheable = UBCORE_NON_CACHEABLE; + flag.bs.access = UBCORE_ACCESS_LOCAL_ONLY; + cfg.va = (uint64_t)seg_va; + cfg.len = seg_len; + cfg.flag = flag; + + ret = ubcore_register_seg(dev, &cfg, NULL); + if (IS_ERR_OR_NULL(ret)) { + ubcm_log_err("reg seg failed\n"); + goto free; + } + ubcm_log_info("Finish to register seg, va: 0x%llx, len: %llu", cfg.va, + seg_len); + return ret; + +free: + kfree(seg_va); + return ret; +} + +static void ubmad_unregister_seg(struct ubcore_target_seg *seg) +{ + uint64_t va = seg->seg.ubva.va; + + (void)ubcore_unregister_seg(seg); + kfree((void *)va); +} + +static int ubmad_create_seg(struct ubmad_jetty_resource *rsrc, + struct ubcore_device *device) +{ + // send_seg + rsrc->send_seg = ubmad_register_seg(device, UBMAD_SEND_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->send_seg)) { + ubcm_log_err("register send_seg failed.\n"); + return -1; + } + rsrc->send_seg_bitmap = ubmad_create_bitmap(UBMAD_SEND_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->send_seg_bitmap)) { + ubcm_log_err("alloc send_seg_bitmap failed\n"); + goto unreg_send_seg; + } + + // recv_seg + rsrc->recv_seg = ubmad_register_seg(device, UBMAD_RECV_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->recv_seg)) { + ubcm_log_err("register recv_seg failed\n"); + goto free_send_seg_bitmap; + } + rsrc->recv_seg_bitmap = ubmad_create_bitmap(UBMAD_RECV_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->recv_seg_bitmap)) { + ubcm_log_err("alloc recv_seg_bitmap failed\n"); + rsrc->recv_seg_bitmap = NULL; + goto unreg_recv_seg; + } + + return 0; + +unreg_recv_seg: + ubmad_unregister_seg(rsrc->recv_seg); + rsrc->recv_seg = NULL; +free_send_seg_bitmap: + ubmad_destroy_bitmap(rsrc->send_seg_bitmap); + rsrc->send_seg_bitmap = NULL; +unreg_send_seg: + ubmad_unregister_seg(rsrc->send_seg); + rsrc->send_seg = NULL; + return -1; +} + +static void ubmad_destroy_seg(struct ubmad_jetty_resource *rsrc) +{ + ubmad_destroy_bitmap(rsrc->recv_seg_bitmap); + rsrc->recv_seg_bitmap = NULL; + ubmad_unregister_seg(rsrc->recv_seg); + rsrc->recv_seg = NULL; + + ubmad_destroy_bitmap(rsrc->send_seg_bitmap); + rsrc->send_seg_bitmap = NULL; + ubmad_unregister_seg(rsrc->send_seg); + rsrc->send_seg = NULL; +} + +/* jetty rsrc */ +static int ubmad_init_jetty_rsrc(struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv) +{ + struct ubcore_device *device = dev_priv->device; + struct ubcore_jetty *jetty; + struct ubcore_jfc *jfc_s; + struct ubcore_jfc *jfc_r; + struct ubcore_jfr *jfr; + uint32_t idx; + int ret; + + /* create jetty */ + jfc_s = ubmad_create_jfc_s(device); + if (IS_ERR_OR_NULL(jfc_s)) { + ubcm_log_err("fail to create jfc_s. dev_name: %s\n", + device->dev_name); + return -1; + } + rsrc->jfc_s = jfc_s; + + jfc_r = ubmad_create_jfc_r(device); + if (IS_ERR_OR_NULL(jfc_r)) { + ubcm_log_err("fail to create jfc_r. dev_name: %s\n", + device->dev_name); + ret = -1; + goto del_jfc_s; + } + rsrc->jfc_r = jfc_r; + + jfr = ubmad_create_jfr(dev_priv, jfc_r); + if (IS_ERR_OR_NULL(jfr)) { + ubcm_log_err("fail to create jfr. dev_name: %s\n", + device->dev_name); + ret = -1; + goto del_jfc_r; + } + + jetty = ubmad_create_jetty(dev_priv, jfc_s, jfc_r, jfr, rsrc->jetty_id); + if (IS_ERR_OR_NULL(jetty)) { + ubcm_log_err("fail to create wk jetty. dev_name: %s, id: %u.\n", + device->dev_name, rsrc->jetty_id); + ret = -1; + goto del_jfr; + } + atomic_set(&rsrc->tx_in_queue, 0); + + ubcm_log_info("well-known jetty id %u eid " EID_FMT ", jfr id: %u.\n", + jetty->jetty_id.id, EID_ARGS(jetty->jetty_id.eid), + jfr->jfr_id.id); + rsrc->jetty = jetty; + + /* create seg */ + ret = ubmad_create_seg(rsrc, device); + if (ret != 0) { + ubcm_log_err("create seg failed. device %s.\n", + device->dev_name); + goto del_jetty; + } + + /* first batch of post_recv */ + for (idx = 0; idx < UBMAD_JFR_DEPTH; idx++) { + ret = ubmad_post_recv(rsrc); + if (ret != 0) { + ubcm_log_err( + "No. %u post recv in the first batch failed. device %s ret %d\n", + idx, device->dev_name, ret); + goto destroy_seg; + } + } + + /* tjetty */ + for (idx = 0; idx < UBMAD_MAX_TJETTY_NUM; idx++) + INIT_HLIST_HEAD(&rsrc->tjetty_hlist[idx]); + spin_lock_init(&rsrc->tjetty_hlist_lock); + + /* reliable communication */ + ubmad_init_seid_hlist(rsrc); + + return 0; +destroy_seg: + ubmad_destroy_seg(rsrc); +del_jetty: + (void)ubcore_delete_jetty(jetty); +del_jfr: + (void)ubcore_delete_jfr(jfr); +del_jfc_r: + (void)ubcore_delete_jfc(jfc_r); +del_jfc_s: + (void)ubcore_delete_jfc(jfc_s); + return ret; +} + +static void ubmad_uninit_jetty_rsrc(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + int i; + + /* reliable communication */ + ubmad_uninit_seid_hlist(rsrc); + + /* tjetty */ + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + for (i = 0; i < UBMAD_MAX_TJETTY_NUM; i++) { + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[i], + node) { + hlist_del(&tjetty->node); + ubmad_unimport_jetty(tjetty); + } + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + ubmad_destroy_seg(rsrc); + (void)ubcore_delete_jetty(rsrc->jetty); + rsrc->jetty = NULL; + + (void)ubcore_delete_jfr(rsrc->jfr); + rsrc->jfr = NULL; + + (void)ubcore_delete_jfc(rsrc->jfc_r); + rsrc->jfc_r = NULL; + + (void)ubcore_delete_jfc(rsrc->jfc_s); + rsrc->jfc_s = NULL; +} + +static int ubmad_init_jetty_rsrc_array(struct ubmad_jetty_resource *rsrc_array, + struct ubmad_device_priv *dev_priv) +{ + int i, j; + int ret; + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + rsrc_array[i].jetty_id = g_ubmad_wk_jetty_id[i]; + ret = ubmad_init_jetty_rsrc(&rsrc_array[i], dev_priv); + if (ret != 0) { + ubcm_log_err( + "Failed to init jetty rsrc, index: %d, ret: %d.\n", + i, ret); + goto uninit_rsrc; + } + } + ubcm_log_info("Finish to init jetty resource.\n"); + + return 0; +uninit_rsrc: + for (j = 0; j < i; j++) + ubmad_uninit_jetty_rsrc(&rsrc_array[j]); + return ret; +} + +static void +ubmad_uninit_jetty_rsrc_array(struct ubmad_jetty_resource *rsrc_array) +{ + int i; + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) + ubmad_uninit_jetty_rsrc(&rsrc_array[i]); +} + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_s(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_s) +{ + int i; + + /* No need to lock as dev_priv kref_put, so jetty resources are valid */ + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + if (dev_priv->jetty_rsrc[i].jfc_s == jfc_s) + return &dev_priv->jetty_rsrc[i]; + } + + return NULL; +} + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_r(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r) +{ + int i; + + /* No need to lock as dev_priv kref_put, so jetty resources are valid */ + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + if (dev_priv->jetty_rsrc[i].jfc_r == jfc_r) + return &dev_priv->jetty_rsrc[i]; + } + + return NULL; +} + +/* device */ +static int +ubmad_create_device_priv_resources(struct ubmad_device_priv *dev_priv) +{ + struct ubcore_device *device = dev_priv->device; + struct ubcore_eid_info *eid_list = NULL; + uint32_t cnt = 0; + int ret; + + /* check */ + if (dev_priv->valid) { + ubcm_log_warn("dev_priv rsrc already inited. dev_name: %s\n", + device->dev_name); + return 0; + } + + eid_list = ubcore_get_eid_list(device, &cnt); + if (eid_list == NULL || cnt == 0) { + ubcm_log_warn( + "No eid_list in device: %s, do not create wk-jetty resource.\n", + device->dev_name); + return 0; + } + + ret = ubmad_init_jetty_rsrc_array(dev_priv->jetty_rsrc, dev_priv); + if (ret != 0) { + ubcm_log_err("Failed to init jetty rsrc array, ret: %d.\n", + ret); + return ret; + } + + dev_priv->valid = true; + return 0; +} + +static void +ubmad_destroy_device_priv_resources(struct ubmad_device_priv *dev_priv) +{ + if (!dev_priv->valid) { + ubcm_log_warn( + "dev_priv rsrc not inited. No need to uninit. dev_name: %s\n", + dev_priv->device->dev_name); + return; + } + dev_priv->valid = false; + + ubmad_uninit_jetty_rsrc_array(dev_priv->jetty_rsrc); +} + +static struct ubmad_device_priv * +ubmad_get_device_priv_lockless(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv, *next; + + list_for_each_entry_safe(dev_priv, next, &g_ubmad_device_list, node) { + if (dev_priv->device == device) { + kref_get(&dev_priv->kref); + return dev_priv; + } + } + return NULL; +} + +struct ubmad_device_priv *ubmad_get_device_priv(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + + return dev_priv; +} + +static void ubmad_release_device_priv(struct kref *kref) +{ + struct ubmad_device_priv *dev_priv = + container_of(kref, struct ubmad_device_priv, kref); + + /* retransmission */ + flush_workqueue(dev_priv->rt_wq); + destroy_workqueue(dev_priv->rt_wq); + + /* rsrc */ + ubmad_destroy_device_priv_resources(dev_priv); + + /* basic */ + ubcore_unregister_event_handler(dev_priv->device, &dev_priv->handler); + kfree(dev_priv); +} + +void ubmad_put_device_priv(struct ubmad_device_priv *dev_priv) +{ + kref_put(&dev_priv->kref, ubmad_release_device_priv); +} + +// init dev_priv rsrc fail won't cause this func ret err +static int ubmad_open_device(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + /* basic */ + dev_priv = kzalloc(sizeof(struct ubmad_device_priv), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + kref_init(&dev_priv->kref); + dev_priv->device = device; + dev_priv->handler.event_callback = ubmad_event_cb; + ubcore_register_event_handler(device, &dev_priv->handler); + + /* rsrc */ + if (ubmad_create_device_priv_resources(dev_priv) != 0) { + // It could be due to eid not added. Wait for ubcore add eid event to init again. + ubcm_log_warn("fail to create dev_priv rsrc. dev_name: %s\n", + device->dev_name); + } + + /* reliable communication */ + dev_priv->rt_wq = create_workqueue("ubmad rt_wq"); + if (IS_ERR_OR_NULL(dev_priv->rt_wq)) { + ubcm_log_err("create rt_wq failed. dev_name: %s\n", + device->dev_name); + ubmad_destroy_device_priv_resources(dev_priv); + ubcore_unregister_event_handler(dev_priv->device, + &dev_priv->handler); + kfree(dev_priv); + return -1; + } + + /* add to list */ + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + list_add_tail(&dev_priv->node, &g_ubmad_device_list); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + + return 0; +} + +static void ubmad_rsrc_notify_close(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + int i; + + if (IS_ERR_OR_NULL(rsrc->jetty) || IS_ERR_OR_NULL(rsrc->send_seg) || + IS_ERR_OR_NULL(rsrc->send_seg_bitmap)) { + ubcm_log_warn("Invalid parameter.\n"); + return; + } + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + for (i = 0; i < UBMAD_MAX_TJETTY_NUM; i++) { + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[i], + node) + ubmad_post_send_close_req(rsrc, tjetty->tjetty); + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); +} + +// send close request to all tjetty before remove kmod +static void ubmad_notify_close(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + int i; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + if (dev_priv == NULL) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return; + } + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) + ubmad_rsrc_notify_close(&dev_priv->jetty_rsrc[i]); + + ubmad_put_device_priv(dev_priv); +} + +static int ubmad_close_device(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + /* remove from list */ + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + if (dev_priv == NULL) { + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return -ENODEV; + } + list_del(&dev_priv->node); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + ubmad_put_device_priv(dev_priv); // put for get above + + /* left triggered by put */ + ubmad_put_device_priv( + dev_priv); // put for kref_init() in ubmad_open_device() + return 0; +} + +static int ubmad_add_device(struct ubcore_device *device) +{ + /* Use main device, do not use namespace logic device */ + int ret; + + /* open dev */ + ret = ubmad_open_device(device); + if (ret != 0) { + ubcm_log_err( + "fail to open mad device, dev_name: %s, ret: %d.\n", + device->dev_name, ret); + return ret; + } + + return 0; +} + +static void ubmad_remove_device(struct ubcore_device *device, void *client_ctx) +{ + int ret; + + ubmad_notify_close(device); + + ret = ubmad_close_device(device); + if (ret != 0) + ubcm_log_err("Failed to close ubmad device, dev_name: %s.\n", + device->dev_name); +} + +static struct ubcore_client g_ubmad_client = { + .list_node = LIST_HEAD_INIT(g_ubmad_client.list_node), + .client_name = "ubmad", + .add = ubmad_add_device, + .remove = ubmad_remove_device +}; + +int ubmad_init(void) +{ + int ret; + + INIT_LIST_HEAD(&g_ubmad_device_list); + INIT_LIST_HEAD(&g_ubmad_agent_list); + + ret = ubcore_register_client(&g_ubmad_client); + if (ret != 0) { + ubcm_log_err("Failed to register ub_mad client, ret: %d.\n", + ret); + return ret; + } + + ubcore_register_cm_eid_ops(ubmad_ubc_eid_ops); + + return 0; +} + +void ubmad_uninit(void) +{ + ubcore_unregister_client(&g_ubmad_client); +} + +/* agent ops */ +static struct ubmad_agent_priv * +ubmad_get_agent_priv_lockless(struct ubcore_device *device) +{ + struct ubmad_agent_priv *agent_priv, *next; + + list_for_each_entry_safe(agent_priv, next, &g_ubmad_agent_list, node) { + if (agent_priv->agent.device == device) { + kref_get(&agent_priv->kref); + return agent_priv; + } + } + + return NULL; +} + +struct ubmad_agent_priv *ubmad_get_agent_priv(struct ubcore_device *device) +{ + struct ubmad_agent_priv *agent_priv; + unsigned long flag; + + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + agent_priv = ubmad_get_agent_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + return agent_priv; +} + +static void ubmad_release_agent_priv(struct kref *kref) +{ + struct ubmad_agent_priv *agent_priv = + container_of(kref, struct ubmad_agent_priv, kref); + + flush_workqueue(agent_priv->jfce_wq); + destroy_workqueue(agent_priv->jfce_wq); + + kfree(agent_priv); +} + +void ubmad_put_agent_priv(struct ubmad_agent_priv *agent_priv) +{ + kref_put(&agent_priv->kref, ubmad_release_agent_priv); +} + +struct ubmad_agent *ubmad_register_agent(struct ubcore_device *device, + ubmad_send_handler send_handler, + ubmad_recv_handler recv_handler, + void *usr_ctx) +{ + struct ubmad_agent *agent; + struct ubmad_agent_priv *agent_priv; + unsigned long flag; + + /* check inputs */ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("device nullptr\n"); + return ERR_PTR(-EINVAL); + } + if (IS_ERR_OR_NULL(send_handler)) + ubcm_log_warn("send_handler null\n"); + if (IS_ERR_OR_NULL(recv_handler)) + ubcm_log_warn("recv_handler null\n"); + + /* create agent_priv */ + agent_priv = kzalloc(sizeof(struct ubmad_agent_priv), GFP_KERNEL); + if (IS_ERR_OR_NULL(agent_priv)) + return ERR_PTR(-ENOMEM); + kref_init(&agent_priv->kref); + + agent_priv->jfce_wq = create_workqueue("ubmad jfce_wq"); + if (IS_ERR_OR_NULL(agent_priv->jfce_wq)) { + ubcm_log_err("create agent_priv workqueue failed.\n"); + kfree(agent_priv); + return NULL; + } + + /* init agent */ + agent = &agent_priv->agent; + agent->device = device; + agent->send_handler = send_handler; + agent->recv_handler = recv_handler; + agent->usr_ctx = usr_ctx; + + /* add to list */ + INIT_LIST_HEAD(&agent_priv->node); + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + list_add_tail(&agent_priv->node, &g_ubmad_agent_list); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + return agent; +} + +int ubmad_unregister_agent(struct ubmad_agent *agent) +{ + unsigned long flag; + struct ubmad_agent_priv *agent_priv; + + if (IS_ERR_OR_NULL(agent)) { + ubcm_log_err("agent nullptr\n"); + return -EINVAL; + } + + /* remove from list */ + agent_priv = container_of(agent, struct ubmad_agent_priv, agent); + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + list_del(&agent_priv->node); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + ubmad_put_agent_priv(agent_priv); // put for kref_init() + + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad.h b/drivers/ub/urma/ubcore/ubcm/ub_mad.h new file mode 100644 index 000000000000..0d20908e26b2 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_mad header, only including Northbound API + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UB_MAD_H +#define UB_MAD_H + +#include +#include "net/ubcore_cm.h" + +/* agent */ +#define UBMAD_SGE_MAX_LEN 2048 // cm data max len + +enum ubmad_msg_type { + UBMAD_CONN_DATA = 0, + UBMAD_CONN_ACK, + UBMAD_UBC_CONN_DATA = UBCORE_CM_CONN_MSG, + UBMAD_UBC_CONN_ACK, + UBMAD_AUTHN_DATA = 0x10, + UBMAD_AUTHN_ACK, + // cm send close request to all tjetty before remove kmod, one-way notification + UBMAD_CLOSE_REQ = 0x20, +}; + +struct ubmad_send_buf { + union ubcore_eid src_eid; + union ubcore_eid dst_eid; + + enum ubmad_msg_type msg_type; + uint32_t payload_len; + uint8_t payload[]; +}; + +/* callbacks for cm in ubmad_jfce_handler */ +struct ubmad_send_cr { + struct ubcore_cr *cr; +}; + +struct ubmad_recv_cr { + struct ubcore_cr *cr; + + // remote eid see cr->remote_id.eid + union ubcore_eid local_eid; + + enum ubmad_msg_type msg_type; + uint64_t payload; + uint32_t payload_len; // != cr->completion_len, latter including msg header size +}; + +struct ubmad_agent; +typedef int (*ubmad_send_handler)(struct ubmad_agent *agent, + struct ubmad_send_cr *cr); +typedef int (*ubmad_recv_handler)(struct ubmad_agent *agent, + struct ubmad_recv_cr *cr); +struct ubmad_agent { + struct ubcore_device *device; + ubmad_send_handler send_handler; + ubmad_recv_handler recv_handler; + void *usr_ctx; +}; + +int ubmad_init(void); +void ubmad_uninit(void); + +struct ubmad_agent *ubmad_register_agent(struct ubcore_device *device, + ubmad_send_handler send_handler, + ubmad_recv_handler recv_handler, + void *usr_ctx); +int ubmad_unregister_agent(struct ubmad_agent *agent); + +int ubmad_post_send(struct ubcore_device *device, + struct ubmad_send_buf *send_buf, + struct ubmad_send_buf **bad_send_buf); +int ubmad_ubc_send(struct ubcore_device *device, + struct ubcore_cm_send_buf *send_buf); + +#endif /* UB_MAD_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h new file mode 100644 index 000000000000..d00e9e4744a3 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_mad private header + * Author: Chen Yutao + * Create: 2025-01-13 + * Note: + * History: 2025-01-13: Create file + */ + +#ifndef UB_MAD_PRIV_H +#define UB_MAD_PRIV_H + +#include + +#include "ub_mad.h" + +/* well-known jetty (wk jetty) parameters */ +#define UBMAD_WK_JETTY_NUM 2 /* well-known jetty 0 and 1 only used in ubcm */ +#define UBMAD_WK_JETTY_ID_0 1U +#define UBMAD_WK_JETTY_ID_1 2U + +// jetty +#define UBMAD_JFS_DEPTH 512U +#define UBMAD_JFR_DEPTH 1024U +#define UBMAD_JFS_MAX_SGE_NUM 1 +#define UBMAD_JFR_MAX_SGE_NUM 1 +#define UBMAD_JETTY_ERR_TIMEOUT 17 + +// seg +#define UBMAD_SEND_SGE_NUM (UBMAD_JFS_DEPTH * 2) +#define UBMAD_RECV_SGE_NUM (UBMAD_JFR_DEPTH * 2) + +// tjetty +#define UBMAD_MAX_TJETTY_NUM 10240 + +/* datapath */ +#define UBMAD_MSG_VERSION_0 0 /* current version */ + +// reliable communication +#define UBMAD_MSN_HLIST_SIZE 1024 +#define UBMAD_MAX_SEID_NUM 1024 + +#define UBMAD_RETRANSMIT_MS 500 +#define UBMAD_RETRANSMIT_PERIOD msecs_to_jiffies(UBMAD_RETRANSMIT_MS) + +#define UBMAD_MAX_RETRY_CNT 4 +#define UBMAD_RX_BITMAP_SIZE 1024 + +#define UBMAD_TX_THREDSHOLD (UBMAD_JFS_DEPTH - 8) + +/* common */ +struct ubmad_bitmap { + unsigned long *bits; + uint32_t size; + uint64_t right_end; /* Only for RX side */ + spinlock_t lock; +}; + +struct ubmad_msn_mgr { + atomic64_t msn_gen; // msn generator, increased with each post_send + + // msn_hlist holds msn that posted but not ack yet. key: msn, val: msn_node + struct hlist_head msn_hlist[UBMAD_MSN_HLIST_SIZE]; // ubmad_msn_node + spinlock_t msn_hlist_lock; +}; + +/* jetty */ +struct ubmad_jetty_resource { + /* jetty */ + uint32_t jetty_id; + struct ubcore_jfc *jfc_s; // send + struct ubcore_jfc *jfc_r; // recv + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; /* well-known jetty */ + atomic_t tx_in_queue; + + /* seg */ + // each post uses one sge in the seg + // send + struct ubcore_target_seg *send_seg; + struct ubmad_bitmap *send_seg_bitmap; + // recv + struct ubcore_target_seg *recv_seg; + struct ubmad_bitmap *recv_seg_bitmap; + + /* tjetty */ + // key: ubcore_eid dst_eid, val: ubmad_tjetty tjetty + struct hlist_head tjetty_hlist[UBMAD_MAX_TJETTY_NUM]; + spinlock_t tjetty_hlist_lock; + + /* reliable communication */ + // source eid hlist, only for target. key: src eid, val: seid_node. + struct hlist_head seid_hlist[UBMAD_MAX_SEID_NUM]; // ubmad_seid_node + spinlock_t seid_hlist_lock; +}; + +struct ubmad_tjetty { + struct ubcore_tjetty *tjetty; + struct kref kref; + struct hlist_node node; // ubmad_device_priv.tjetty_hlist + + /* reliable communication */ + struct ubmad_msn_mgr msn_mgr; // for retransmit, only for initiator +}; + +/* device */ +// device contains resources used inside ubmad, including jetty, seg and etc. +struct ubmad_device_priv { + struct ubcore_device *device; + struct kref kref; + struct list_head node; // g_ubmad_device_list + struct ubcore_event_handler handler; + + /** resources **/ + bool valid; // following resources inited or not + struct ubmad_jetty_resource + jetty_rsrc[UBMAD_WK_JETTY_NUM]; // well-known jetty resource + /** end of resources **/ + + /* reliable communication */ + struct workqueue_struct + *rt_wq; // retransmit work queue, only for initiator + struct ubcore_eid_info eid_info; + bool has_create_jetty_rsrc; +}; + +/* agent */ +// agent contains resources used between ubmad and ubcm. +struct ubmad_agent_priv { + struct ubmad_agent agent; + struct kref kref; + struct list_head node; // g_ubmad_agent_list + struct workqueue_struct *jfce_wq; // ubmad_jfce_work +}; + +/** datapath **/ +/* msg */ +/* + * 1. considering 8B alignment, layout is not logical. + * 2. msg is stored in sge rather than alloc. + */ +struct ubmad_msg { + uint8_t version; + uint8_t msg_type; // ubmad_msg_type + uint16_t payload_len; + uint32_t reserved; // reserved for 8B aligned + + uint64_t msn; // Message sequence number + + uint8_t payload[]; +}; + +/* poll */ +/* + * msg not processed right after poll jfc in ubmad_jfce_handler(), but tranformed to ubmad_jfce_work + * and left for workqueue. + */ +enum ubmad_jfce_work_type { UBMAD_SEND_WORK, UBMAD_RECV_WORK }; + +struct ubmad_jfce_work { + struct work_struct work; // ubmad_agent_priv.jfce_wq + enum ubmad_jfce_work_type type; + + struct ubcore_jfc *jfc; + struct ubmad_agent_priv *agent_priv; +}; + +/** reliable communication **/ +/* for initiator (data sender, ack recver) */ +// add msn_node to msn_hlist when post and remove when recv ack +struct ubmad_msn_node { + struct hlist_node node; // ubmad_msn_mgr.msn_hlist + uint64_t msn; +}; + +/* try to find msn_node in msn_hlist when timeout. If find, repost and re-add work, + * else indicating already ack, free work. + */ +struct ubmad_rt_work { + struct delayed_work delay_work; // ubmad_device_priv.rt_wq + + uint64_t msn; + uint32_t rt_cnt; /* Retry count, no larger than UBMAD_MAX_RETRY_CNT */ + struct ubmad_msn_mgr *msn_mgr; + + struct ubmad_msg *msg; + struct ubmad_tjetty *tjetty; + struct ubmad_jetty_resource *rsrc; + struct workqueue_struct *rt_wq; +}; + +/* for target (data recver, ack sender) */ +struct ubmad_seid_node { + struct hlist_node node; // ubmad_device_priv.seid_hlist + union ubcore_eid seid; + struct kref kref; + + atomic64_t expected_msn; + struct ubmad_bitmap *rx_bitmap; +}; + +/* common */ +struct ubmad_bitmap *ubmad_create_bitmap(uint32_t bitmap_size); +void ubmad_destroy_bitmap(struct ubmad_bitmap *bitmap); +uint32_t ubmad_bitmap_get_id(struct ubmad_bitmap *bitmap); +int ubmad_bitmap_put_id(struct ubmad_bitmap *bitmap, uint32_t id); +bool ubmad_bitmap_test_id(struct ubmad_bitmap *bitmap, uint32_t id); +int ubmad_bitmap_set_id(struct ubmad_bitmap *bitmap, uint32_t id); + +/* jetty */ +struct ubmad_tjetty *ubmad_get_tjetty(union ubcore_eid *dst_eid, + struct ubmad_jetty_resource *rsrc); +void ubmad_put_tjetty(struct ubmad_tjetty *tjetty); + +struct ubmad_tjetty *ubmad_import_jetty(struct ubcore_device *device, + struct ubmad_jetty_resource *rsrc, + union ubcore_eid *dst_eid); + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_s(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_s); +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_r(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r); + +void ubmad_remove_tjetty(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc); + +/* device */ +struct ubmad_device_priv *ubmad_get_device_priv(struct ubcore_device *device); +void ubmad_put_device_priv(struct ubmad_device_priv *dev_priv); + +/* agent */ +struct ubmad_agent_priv *ubmad_get_agent_priv(struct ubcore_device *device); +void ubmad_put_agent_priv(struct ubmad_agent_priv *agent_priv); + +/** datapath **/ +/* reliable communication */ +void ubmad_init_msn_mgr(struct ubmad_msn_mgr *msn_mgr); +void ubmad_uninit_msn_mgr(struct ubmad_msn_mgr *msn_mgr); + +void ubmad_init_seid_hlist(struct ubmad_jetty_resource *rsrc); +void ubmad_uninit_seid_hlist(struct ubmad_jetty_resource *rsrc); + +/* post */ +int ubmad_repost_send(struct ubmad_msg *msg, struct ubmad_tjetty *tjetty, + struct ubcore_target_seg *send_seg, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc); +void ubmad_post_send_close_req(struct ubmad_jetty_resource *rsrc, + struct ubcore_tjetty *tjetty); + +int ubmad_post_recv(struct ubmad_jetty_resource *rsrc); + +/* poll */ +void ubmad_jfce_handler_s(struct ubcore_jfc *jfc); +void ubmad_jfce_handler_r(struct ubcore_jfc *jfc); + +#endif /* UB_MAD_PRIV_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c new file mode 100644 index 000000000000..609186342937 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c @@ -0,0 +1,900 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_cm generic netlink implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include +#include +#include + +#include + +#include "ub_mad.h" +#include "ub_cm.h" +#include "ubcm_log.h" +#include "ubcm_genl.h" + +struct ubcm_uvs_list { + spinlock_t lock; /* for both uvs list and eid_hlist of uvs_node */ + struct list_head list; /* uvs genl nodes list */ + int count; /* number of uvs genl nodes in list */ + uint32_t next_id; /* next id for uvs */ +}; + +static struct ubcm_uvs_list g_ubcm_uvs_list = { 0 }; +static inline struct ubcm_uvs_list *get_uvs_list(void) +{ + return &g_ubcm_uvs_list; +} +atomic_t g_ubcm_nlmsg_seq; + +static int ubcm_genl_uvs_add_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_remove_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_add_eid_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_del_eid_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_msg_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_authn_handler(struct sk_buff *skb, + struct genl_info *info); + +static int ubcm_nl_notifier_call(struct notifier_block *nb, + unsigned long action, void *data); + +static const struct nla_policy g_ubcm_policy[NUM_UBCM_ATTR] = { + [UBCM_ATTR_UNSPEC] = { 0 }, + [UBCM_HDR_COMMAND] = { .type = NLA_U32 }, + [UBCM_HDR_ARGS_LEN] = { .type = NLA_U32 }, + [UBCM_HDR_ARGS_ADDR] = { .type = NLA_U64 }, + [UBCM_ATTR_NS_MODE] = { .type = NLA_U8 }, + [UBCM_ATTR_DEV_NAME] = { .type = NLA_STRING, + .len = UBCORE_MAX_DEV_NAME - 1 }, + [UBCM_ATTR_NS_FD] = { .type = NLA_U32 }, + [UBCM_MSG_SEQ] = { .type = NLA_U32 }, + [UBCM_MSG_TYPE] = { .type = NLA_U32 }, + [UBCM_SRC_ID] = { .len = UBCORE_EID_SIZE }, + [UBCM_DST_ID] = { .len = UBCORE_EID_SIZE }, + [UBCM_RESERVED] = { .type = NLA_U32 }, + [UBCM_PAYLOAD_DATA] = { .type = NLA_BINARY } +}; + +static const struct genl_ops g_ubcm_genl_ops[] = { + { .cmd = UBCM_CMD_UVS_ADD, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_add_handler }, + { .cmd = UBCM_CMD_UVS_REMOVE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_remove_handler }, + { .cmd = UBCM_CMD_UVS_ADD_EID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_add_eid_handler }, + { .cmd = UBCM_CMD_UVS_DEL_EID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_del_eid_handler }, + { .cmd = UBCM_CMD_UVS_MSG, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_msg_handler }, + { .cmd = UBCM_CMD_UVS_AUTHN, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_authn_handler } +}; + +struct genl_family g_ubcm_genl_family __ro_after_init = { + .hdrsize = 0, + .name = UBCM_GENL_FAMILY_NAME, + .version = UBCM_GENL_FAMILY_VERSION, + .maxattr = UBCM_ATTR_MAX, + .policy = g_ubcm_policy, + + .resv_start_op = UBCM_CMD_NUM, + + .netnsok = true, + .module = THIS_MODULE, + .ops = g_ubcm_genl_ops, + .n_ops = ARRAY_SIZE(g_ubcm_genl_ops) +}; + +static struct notifier_block g_ubcm_nl_notifier = { + .notifier_call = ubcm_nl_notifier_call, +}; + +static int ubcm_check_uvs_para(struct genl_info *info, uint32_t *length) +{ + uint32_t payload_len; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len == 0 || payload_len > UBCM_MAX_UVS_NAME_LEN) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return -EINVAL; + } + + *length = payload_len; + + return 0; +} + +static int ubcm_copy_uvs_name(struct genl_info *info, char *uvs_name, + uint32_t payload_len) +{ + (void)memcpy(uvs_name, nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + uvs_name[UBCM_MAX_UVS_NAME_LEN - 1] = '\0'; + + return 0; +} + +static struct ubcm_uvs_genl_node * +ubcm_lookup_genl_node_lockless(const char *uvs_name, + struct ubcm_uvs_list *uvs_list) +{ + struct ubcm_uvs_genl_node *node, *next; + + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + if (strcmp(node->name, uvs_name) == 0) + return node; + } + return NULL; +} + +static int ubcm_genl_uvs_add(const char *uvs_name, uint32_t genl_port, + struct sock *genl_sock) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *new = NULL; + struct ubcm_uvs_genl_node *node; + int idx; + + new = kzalloc(sizeof(struct ubcm_uvs_genl_node), GFP_ATOMIC); + if (new == NULL) + return -ENOMEM; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_genl_node_lockless(uvs_name, uvs_list); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Uvs: %s already exist.\n", uvs_name); + kfree(new); + return -EEXIST; + } + + (void)strscpy(new->name, uvs_name, UBCM_MAX_UVS_NAME_LEN); + kref_init(&new->ref); + new->pid = (uint32_t)task_tgid_vnr(current); + new->id = uvs_list->next_id; + new->state = UBCM_UVS_STATE_ALIVE; + atomic_set(&new->map2ue, 0); + new->genl_port = genl_port; + new->genl_sock = genl_sock; + for (idx = 0; idx < UBCM_EID_TABLE_SIZE; idx++) + INIT_HLIST_HEAD(&new->eid_hlist[idx]); + + list_add_tail(&new->list_node, &uvs_list->list); + uvs_list->count++; + uvs_list->next_id++; + spin_unlock(&uvs_list->lock); + + ubcm_log_info("Finish to add uvs node: %s, id: %u.\n", uvs_name, + new->id); + return 0; +} + +static int ubcm_genl_uvs_add_handler(struct sk_buff *skb, + struct genl_info *info) +{ + char uvs_name[UBCM_MAX_UVS_NAME_LEN] = { 0 }; + uint32_t payload_len; + int ret; + + ret = ubcm_check_uvs_para(info, &payload_len); + if (ret != 0) { + ubcm_log_err("Invalid add parameter.\n"); + return ret; + } + + ret = ubcm_copy_uvs_name(info, uvs_name, payload_len); + if (ret != 0) + return ret; + + ret = ubcm_genl_uvs_add(uvs_name, info->snd_portid, + genl_info_net(info)->genl_sock); + if (ret != 0) { + ubcm_log_err("Failed to add uvs genl node: %s.\n", uvs_name); + return ret; + } + + return 0; +} + +void ubcm_uvs_kref_get(struct ubcm_uvs_genl_node *node) +{ + kref_get(&node->ref); +} + +static void ubcm_uvs_kref_release(struct kref *ref) +{ + struct ubcm_uvs_genl_node *node = + container_of(ref, struct ubcm_uvs_genl_node, ref); + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + int i; + + spin_lock(&uvs_list->lock); + for (i = 0; i < UBCM_EID_TABLE_SIZE; i++) { + hlist_for_each_entry_safe(eid_node, next, &node->eid_hlist[i], + node) { + hlist_del(&eid_node->node); + kfree(eid_node); + } + } + spin_unlock(&uvs_list->lock); + + ubcm_log_info("Release uvs: %s, uvs_id: %u.\n", node->name, node->id); + kfree(node); +} + +void ubcm_uvs_kref_put(struct ubcm_uvs_genl_node *node) +{ + uint32_t refcnt; + + refcnt = kref_read(&node->ref); + ubcm_log_info("kref_put: uvs %s, id %u, old refcnt %u, new refcnt %u\n", + node->name, node->id, refcnt, + refcnt > 0 ? refcnt - 1 : 0); + + (void)kref_put(&node->ref, ubcm_uvs_kref_release); +} + +static int ubcm_genl_uvs_remove(const char *uvs_name) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_genl_node_lockless(uvs_name, uvs_list); + if (node == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to lookup uvs node: %s.\n", uvs_name); + return -ENOENT; + } + + if (node->state == UBCM_UVS_STATE_DEAD) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Uvs: %s already set dead.\n", uvs_name); + return -EPERM; + } + + if (atomic_read(&node->map2ue) != 0) { + node->state = UBCM_UVS_STATE_DEAD; + spin_unlock(&uvs_list->lock); + ubcm_log_info( + "Uvs %s was referenced by ue, set dead and keep it.\n", + uvs_name); + return 0; + } + + list_del(&node->list_node); + node->state = UBCM_UVS_STATE_DEAD; + uvs_list->count--; + spin_unlock(&uvs_list->lock); + ubcm_uvs_kref_put(node); + + ubcm_log_info("Uvs: %s removed.\n", uvs_name); + return 0; +} + +static int ubcm_genl_uvs_remove_handler(struct sk_buff *skb, + struct genl_info *info) +{ + char uvs_name[UBCM_MAX_UVS_NAME_LEN]; + uint32_t payload_len; + int ret; + + ret = ubcm_check_uvs_para(info, &payload_len); + if (ret != 0) { + ubcm_log_err("Invalid remove parameter.\n"); + return ret; + } + + ret = ubcm_copy_uvs_name(info, uvs_name, payload_len); + if (ret != 0) + return ret; + + ret = ubcm_genl_uvs_remove(uvs_name); + if (ret != 0) { + ubcm_log_err("Failed to remove uvs genl node: %s.\n", uvs_name); + return ret; + } + + return 0; +} + +static int ubcm_parse_uvs_eid_para(struct genl_info *info, + struct ubcm_nlmsg_op_eid *para, + enum ubcm_genl_msg_type type) +{ + uint32_t payload_len; + uint32_t msg_type; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len != sizeof(struct ubcm_nlmsg_op_eid)) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return -EINVAL; + } + + msg_type = nla_get_u32(info->attrs[UBCM_MSG_TYPE]); + if (msg_type != (uint32_t)type) { + ubcm_log_err("Invalid msg_type: %u, type: %u.\n", msg_type, + (uint32_t)type); + return -EINVAL; + } + + (void)memcpy(para, nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + para->uvs_name[UBCM_MAX_UVS_NAME_LEN - 1] = '\0'; + return 0; +} + +static struct ubcm_uvs_eid_node * +ubcm_find_eid_node_lockless(struct ubcm_uvs_genl_node *uvs, uint32_t hash, + union ubcore_eid *eid) +{ + /* No need to check hash as it is no larger than UBCM_EID_TABLE_SIZE */ + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + + hlist_for_each_entry_safe(eid_node, next, &uvs->eid_hlist[hash], node) { + if (memcmp(&eid_node->eid, eid, sizeof(union ubcore_eid)) == 0) + return eid_node; + } + + ubcm_log_info("Failed to lookup eid node: " EID_FMT ", hash: %u.\n", + EID_ARGS(*eid), hash); + return NULL; +} + +static int ubcm_add_uvs_eid(struct ubcm_nlmsg_op_eid *para) +{ + uint32_t hash = jhash(¶->eid, sizeof(union ubcore_eid), 0) % + UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_eid_node *node, *new; + struct ubcm_uvs_genl_node *uvs; + + /* Step 1: Lookup eid node to judge whether to create new node */ + spin_lock(&uvs_list->lock); + uvs = ubcm_lookup_genl_node_lockless(para->uvs_name, uvs_list); + if (uvs == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to find uvs: %s.\n", para->uvs_name); + return -EINVAL; + } + + if (uvs->eid_cnt >= UBCM_EID_TABLE_SIZE) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Invalid operation, eid_cnt: %u.\n", uvs->eid_cnt); + return -EINVAL; + } + + node = ubcm_find_eid_node_lockless(uvs, hash, ¶->eid); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Eid: " EID_FMT " already added in uvs: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + return -1; + } + spin_unlock(&uvs_list->lock); + + /* Step 2: Create new eid node */ + new = kzalloc(sizeof(struct ubcm_uvs_genl_node), GFP_KERNEL); + if (new == NULL) + return -ENOMEM; + new->eid_idx = para->eid_idx; + new->eid = para->eid; + INIT_HLIST_NODE(&new->node); + + /* Step 3: Lookup eid node to judge whether to add the new node into hlist */ + spin_lock(&uvs_list->lock); + node = ubcm_find_eid_node_lockless(uvs, hash, ¶->eid); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Eid: " EID_FMT " added in uvs: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + kfree(new); + return -1; + } + hlist_add_head(&new->node, &uvs->eid_hlist[hash]); + uvs->eid_cnt++; + spin_unlock(&uvs_list->lock); + ubcm_log_info("Finish to add uvs eid: " EID_FMT ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + + return 0; +} + +static int ubcm_genl_uvs_add_eid_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_nlmsg_op_eid para = { 0 }; + int ret; + + ret = ubcm_parse_uvs_eid_para(info, ¶, UBCM_CMD_UVS_ADD_EID); + if (ret != 0) + return ret; + + return ubcm_add_uvs_eid(¶); +} + +static int ubcm_find_del_eid_node_lockless(struct ubcm_uvs_genl_node *uvs, + union ubcore_eid *eid) +{ + uint32_t hash = + jhash(eid, sizeof(union ubcore_eid), 0) % UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + + hlist_for_each_entry_safe(eid_node, next, &uvs->eid_hlist[hash], node) { + if (memcmp(&eid_node->eid, eid, sizeof(union ubcore_eid)) == + 0) { + hlist_del(&eid_node->node); + kfree(eid_node); + uvs->eid_cnt--; + return 0; + } + } + + ubcm_log_err("Failed to lookup eid node: " EID_FMT ", hash: %u.\n", + EID_ARGS(*eid), hash); + return -1; +} + +static int ubcm_del_uvs_eid(struct ubcm_nlmsg_op_eid *para) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *uvs; + int ret; + + spin_lock(&uvs_list->lock); + uvs = ubcm_lookup_genl_node_lockless(para->uvs_name, uvs_list); + if (uvs == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed find uvs: %s.\n", para->uvs_name); + return -EINVAL; + } + if (uvs->eid_cnt == 0) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Invalid operation, there is no valid eid.\n"); + return -EINVAL; + } + + ret = ubcm_find_del_eid_node_lockless(uvs, ¶->eid); + spin_unlock(&uvs_list->lock); + + if (ret != 0) { + ubcm_log_err("Failed to delete uvs eid: " EID_FMT + ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + } else { + ubcm_log_info("Finish to delete uvs eid: " EID_FMT + ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + } + return ret; +} + +static int ubcm_genl_uvs_del_eid_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_nlmsg_op_eid para = { 0 }; + int ret; + + ret = ubcm_parse_uvs_eid_para(info, ¶, UBCM_CMD_UVS_DEL_EID); + if (ret != 0) + return ret; + + return ubcm_del_uvs_eid(¶); +} + +static struct ubmad_send_buf *ubcm_get_nlmsg_send_buf(struct genl_info *info) +{ + struct ubmad_send_buf *send_buf; + uint32_t payload_len; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return NULL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len > UBCM_MAX_NL_MSG_BUF_LEN) { + ubcm_log_err("Invalid payload_len: %u.\n", payload_len); + return NULL; + } + + send_buf = + kzalloc((size_t)(sizeof(struct ubmad_send_buf) + payload_len), + GFP_KERNEL); + if (send_buf == NULL) + return NULL; + + send_buf->payload_len = payload_len; + send_buf->msg_type = UBMAD_CONN_DATA; // using wk_jetty0 + + if (info->attrs[UBCM_SRC_ID]) + (void)memcpy(&send_buf->src_eid, + nla_data(info->attrs[UBCM_SRC_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_DST_ID]) + (void)memcpy(&send_buf->dst_eid, + nla_data(info->attrs[UBCM_DST_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_PAYLOAD_DATA]) + (void)memcpy(send_buf->payload, + nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + + return send_buf; +} + +static int ubcm_genl_uvs_msg_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_send_buf *send_buf; + struct ubcm_work *cm_work; + bool ret; + + send_buf = ubcm_get_nlmsg_send_buf(info); + if (send_buf == NULL) { + ubcm_log_err("Failed to get nlmsg send buffer.\n"); + return -1; + } + + cm_work = kzalloc(sizeof(struct ubcm_work), GFP_ATOMIC); + if (cm_work == NULL) { + kfree(send_buf); + return -ENOMEM; + } + cm_work->send_buf = send_buf; + + INIT_WORK(&cm_work->work, ubcm_work_handler); + /* return value: 1-work is executing in work-queue; 0-work is not executing */ + ret = queue_work(cm_ctx->wq, &cm_work->work); + if (!ret) { + kfree(cm_work); + kfree(send_buf); + ubcm_log_err("Cm work already in workqueue, ret: %u.\n", ret); + return -1; + } + + return 0; +} + +static struct ubmad_send_buf *ubcm_get_nlmsg_authn_buf(struct genl_info *info) +{ + struct ubmad_send_buf *send_buf; + + send_buf = kzalloc((size_t)(sizeof(struct ubmad_send_buf)), GFP_KERNEL); + if (send_buf == NULL) + return NULL; + send_buf->payload_len = 0; + send_buf->msg_type = UBMAD_AUTHN_DATA; // using wk_jetty1 + + if (info->attrs[UBCM_SRC_ID]) + (void)memcpy(&send_buf->src_eid, + nla_data(info->attrs[UBCM_SRC_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_DST_ID]) + (void)memcpy(&send_buf->dst_eid, + nla_data(info->attrs[UBCM_DST_ID]), + UBCORE_EID_SIZE); + + return send_buf; +} + +static int ubcm_genl_uvs_authn_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_send_buf *send_buf; + struct ubcm_work *cm_work; + bool ret; + + send_buf = ubcm_get_nlmsg_authn_buf(info); + if (send_buf == NULL) { + ubcm_log_err("Failed to get nlmsg authentication buffer.\n"); + return -1; + } + + cm_work = kzalloc(sizeof(struct ubcm_work), GFP_ATOMIC); + if (cm_work == NULL) { + kfree(send_buf); + return -ENOMEM; + } + cm_work->send_buf = send_buf; + + INIT_WORK(&cm_work->work, ubcm_work_handler); + /* return value: 1-work is executing in work-queue; 0-work is not executing */ + ret = queue_work(cm_ctx->wq, &cm_work->work); + if (!ret) { + kfree(cm_work); + kfree(send_buf); + ubcm_log_err("Cm work already in workqueue, ret: %u.\n", ret); + return -1; + } + + return 0; +} + +static struct ubcm_uvs_genl_node * +ubcm_lookup_node_by_portid_lockless(struct ubcm_uvs_list *uvs_list, + uint32_t portid) +{ + struct ubcm_uvs_genl_node *result = NULL; + struct ubcm_uvs_genl_node *node, *next; + + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + if (node->genl_port == portid) { + result = node; + break; + } + } + + return result; +} + +static void ubcm_unset_genl_pid(uint32_t portid) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_node_by_portid_lockless(uvs_list, portid); + if (node == NULL) { + spin_unlock(&uvs_list->lock); + return; + } + + list_del(&node->list_node); + spin_unlock(&uvs_list->lock); + + ubcm_log_err("Finish to unset port: %u for uvs: %s, id: %u.\n", portid, + node->name, node->id); + node->genl_port = UBCM_GENL_INVALID_PORT; + node->genl_sock = NULL; + /* free node buffer */ + ubcm_uvs_kref_put(node); +} + +static int ubcm_nl_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct netlink_notify *notify = data; + + if (action != NETLINK_URELEASE || notify == NULL || + notify->protocol != NETLINK_GENERIC) + return NOTIFY_DONE; + + ubcm_unset_genl_pid(notify->portid); + return NOTIFY_DONE; +} + +static void ubcm_uvs_list_init(void) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + + spin_lock_init(&uvs_list->lock); + INIT_LIST_HEAD(&uvs_list->list); + uvs_list->count = 0; + uvs_list->next_id = 1; /* 0 for invalid uvs id */ +} + +static void ubcm_uvs_list_uninit(void) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node, *next; + + spin_lock(&uvs_list->lock); + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + list_del(&node->list_node); + kfree(node); + } + uvs_list->count = 0; + uvs_list->next_id = 0; + spin_unlock(&uvs_list->lock); +} + +int ubcm_genl_init(void) +{ + int ret; + + ubcm_uvs_list_init(); + ret = genl_register_family(&g_ubcm_genl_family); + if (ret != 0) + ubcm_log_err( + "Failed to init ubcm generic netlink family, ret: %d.\n", + ret); + + ret = netlink_register_notifier(&g_ubcm_nl_notifier); + if (ret != 0) + ubcm_log_err("Failed to register notifier, ret: %d.\n", ret); + + ubcm_log_info("Finish to init ubcm generic netlink.\n"); + return ret; +} + +void ubcm_genl_uninit(void) +{ + (void)netlink_unregister_notifier(&g_ubcm_nl_notifier); + (void)genl_unregister_family(&g_ubcm_genl_family); + ubcm_uvs_list_uninit(); +} + +struct ubcm_nlmsg *ubcm_alloc_genl_msg(struct ubmad_recv_cr *recv_cr) +{ + uint32_t payload_len = recv_cr->payload_len; + struct ubcm_nlmsg *nlmsg; + + nlmsg = kzalloc(sizeof(struct ubcm_nlmsg) + payload_len, GFP_KERNEL); + if (nlmsg == NULL) + return NULL; + + nlmsg->src_eid = recv_cr->cr->remote_id.eid; + nlmsg->dst_eid = recv_cr->local_eid; + nlmsg->msg_type = UBCM_CMD_UVS_MSG; + nlmsg->payload_len = payload_len; + (void)memcpy(nlmsg->payload, (const void *)recv_cr->payload, + payload_len); + nlmsg->nlmsg_seq = ubcm_get_nlmsg_seq(); + + return nlmsg; +} + +struct ubcm_nlmsg *ubcm_alloc_genl_authn_msg(struct ubmad_recv_cr *recv_cr) +{ + uint32_t payload_len = recv_cr->payload_len; + struct ubcm_nlmsg *nlmsg; + + if (payload_len != 0) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return ERR_PTR(-EINVAL); + } + nlmsg = kzalloc(sizeof(struct ubcm_nlmsg), GFP_KERNEL); + if (nlmsg == NULL) + return NULL; + + nlmsg->src_eid = recv_cr->cr->remote_id.eid; + nlmsg->dst_eid = recv_cr->local_eid; + nlmsg->msg_type = UBCM_CMD_UVS_AUTHN; + nlmsg->payload_len = payload_len; + nlmsg->nlmsg_seq = ubcm_get_nlmsg_seq(); + + return nlmsg; +} + +struct ubcm_uvs_genl_node *ubcm_find_get_uvs_by_eid(union ubcore_eid *eid) +{ + uint32_t hash = + jhash(eid, sizeof(union ubcore_eid), 0) % UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *uvs, *next_uvs; + struct ubcm_uvs_eid_node *node; + struct hlist_node *next_node; + + spin_lock(&uvs_list->lock); + list_for_each_entry_safe(uvs, next_uvs, &uvs_list->list, list_node) { + if (IS_ERR_OR_NULL(uvs) || uvs->eid_cnt == 0) + continue; + hlist_for_each_entry_safe(node, next_node, + &uvs->eid_hlist[hash], node) { + if (memcmp(&node->eid, eid, sizeof(union ubcore_eid)) == + 0) { + ubcm_uvs_kref_get(uvs); + spin_unlock(&uvs_list->lock); + ubcm_log_info("Find uvs: %s by eid: " EID_FMT + ".\n", + uvs->name, EID_ARGS(*eid)); + return uvs; + } + } + } + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to find uvs by eid: " EID_FMT ".\n", + EID_ARGS(*eid)); + + return NULL; +} + +int ubcm_genl_unicast(struct ubcm_nlmsg *msg, uint32_t len, + struct ubcm_uvs_genl_node *uvs) +{ + struct sk_buff *nl_skb; + struct nlmsghdr *nlh; + int ret; + + if (msg == NULL || uvs->genl_sock == NULL || + uvs->genl_port == UBCM_GENL_INVALID_PORT) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + /* create sk_buff */ + nl_skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (nl_skb == NULL) { + ubcm_log_err("Failed to creatge nl_skb.\n"); + return -1; + } + + /* set genl head */ + nlh = genlmsg_put(nl_skb, uvs->genl_port, msg->nlmsg_seq, + &g_ubcm_genl_family, NLM_F_ACK, + (uint8_t)msg->msg_type); + if (nlh == NULL) { + ubcm_log_err("Failed to nlmsg put.\n"); + nlmsg_free(nl_skb); + return -1; + } + if (nla_put_u32(nl_skb, UBCM_MSG_SEQ, msg->nlmsg_seq) || + nla_put_u32(nl_skb, UBCM_MSG_TYPE, msg->msg_type) || + nla_put(nl_skb, UBCM_SRC_ID, (int)sizeof(union ubcore_eid), + &msg->src_eid) || + nla_put(nl_skb, UBCM_DST_ID, (int)sizeof(union ubcore_eid), + &msg->dst_eid) || + nla_put(nl_skb, UBCM_PAYLOAD_DATA, (int)msg->payload_len, + msg->payload)) { + ubcm_log_err("Failed in nla_put operations.\n"); + nlmsg_free(nl_skb); + return -1; + } + + genlmsg_end(nl_skb, nlh); + ubcm_log_info("Finish to send genl msg, seq: %u, payload_len: %u.\n", + msg->nlmsg_seq, msg->payload_len); + + ret = nlmsg_unicast(uvs->genl_sock, nl_skb, uvs->genl_port); + if (ret != 0) { + ubcm_log_err("Failed to send genl msg, ret: %d.\n", ret); + nlmsg_free(nl_skb); + return ret; + } + + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h new file mode 100644 index 000000000000..00fef1761843 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcm generic netlink header + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UBCM_GENL_H +#define UBCM_GENL_H + +#include + +#include + +#include "ub_mad.h" + +/* NETLINK_GENERIC related info */ +#define UBCM_GENL_FAMILY_NAME "UBCM_GENL" +#define UBCM_GENL_FAMILY_VERSION 1 +#define UBCM_GENL_INVALID_PORT 0 +#define UBCM_MAX_NL_MSG_BUF_LEN 1024 +#define UBCM_EID_TABLE_SIZE 256 /* Refer to TPSA_EID_IDX_TABLE_SIZE */ + +#define UBCM_MAX_UVS_NAME_LEN 64 + +enum ubcm_uvs_state { UBCM_UVS_STATE_DEAD = 0, UBCM_UVS_STATE_ALIVE }; + +enum ubcm_genl_attr { /* Refer to enum uvs_cm_genl_attr */ + UBCM_ATTR_UNSPEC, + UBCM_HDR_COMMAND, + UBCM_HDR_ARGS_LEN, + UBCM_HDR_ARGS_ADDR, + UBCM_ATTR_NS_MODE, + UBCM_ATTR_DEV_NAME, + UBCM_ATTR_NS_FD, + UBCM_MSG_SEQ, + UBCM_MSG_TYPE, + UBCM_SRC_ID, + UBCM_DST_ID, + UBCM_RESERVED, + UBCM_PAYLOAD_DATA, + UBCM_ATTR_AFTER_LAST, + NUM_UBCM_ATTR = UBCM_ATTR_AFTER_LAST, + UBCM_ATTR_MAX = UBCM_ATTR_AFTER_LAST - 1 +}; + +/* Handling generic netlnik messages from UVS, only forward messages */ +enum ubcm_genl_msg_type { + UBCM_CMD_UVS_ADD = 0, + UBCM_CMD_UVS_REMOVE, + UBCM_CMD_UVS_ADD_EID, + UBCM_CMD_UVS_DEL_EID, + UBCM_CMD_UVS_MSG, + UBCM_CMD_UVS_AUTHN, /* Authentication */ + UBCM_CMD_NUM +}; + +struct ubcm_nlmsg { /* Refer to uvs_nl_cm_msg_t */ + uint32_t nlmsg_seq; + uint32_t msg_type; /* Refer to ubcm_genl_msg_type */ + union ubcore_eid src_eid; + union ubcore_eid dst_eid; + uint32_t payload_len; + uint32_t reserved; + uint8_t payload[]; +}; + +struct ubcm_uvs_eid_node { + struct hlist_node node; + uint32_t eid_idx; + uint32_t reserved; + union ubcore_eid eid; +}; + +struct ubcm_uvs_genl_node { + struct list_head list_node; + struct kref ref; + char name[UBCM_MAX_UVS_NAME_LEN]; /* name to identify UVS */ + enum ubcm_uvs_state state; + uint32_t id; + uint32_t policy; + uint32_t genl_port; /* uvs genl port */ + struct sock *genl_sock; + uint32_t pid; + atomic_t map2ue; + atomic_t nl_wait_buffer; + struct hlist_head eid_hlist + [UBCM_EID_TABLE_SIZE]; /* Storing struct ubcm_uvs_eid_node */ + uint32_t eid_cnt; +}; + +/* Payload structure for UBCM_CMD_UVS_ADD_EID or UBCM_CMD_UVS_DEL_EID */ +struct ubcm_nlmsg_op_eid { + uint32_t eid_idx; + uint32_t reserved; + union ubcore_eid eid; + char uvs_name[UBCM_MAX_UVS_NAME_LEN]; +}; + +extern atomic_t g_ubcm_nlmsg_seq; +static inline uint32_t ubcm_get_nlmsg_seq(void) +{ + return atomic_inc_return(&g_ubcm_nlmsg_seq); +} + +int ubcm_genl_init(void); +void ubcm_genl_uninit(void); + +struct ubcm_nlmsg *ubcm_alloc_genl_msg(struct ubmad_recv_cr *recv_cr); +struct ubcm_nlmsg *ubcm_alloc_genl_authn_msg(struct ubmad_recv_cr *recv_cr); + +void ubcm_uvs_kref_get(struct ubcm_uvs_genl_node *node); +void ubcm_uvs_kref_put(struct ubcm_uvs_genl_node *node); + +struct ubcm_uvs_genl_node *ubcm_find_get_uvs_by_eid(union ubcore_eid *eid); + +static inline uint32_t ubcm_nlmsg_len(struct ubcm_nlmsg *msg) +{ + return sizeof(struct ubcm_nlmsg) + msg->payload_len; +} + +/* Ubcm send nlmsg to UVS by netlink */ +int ubcm_genl_unicast(struct ubcm_nlmsg *msg, uint32_t len, + struct ubcm_uvs_genl_node *uvs); + +#endif /* UBCM_GENL_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_log.c b/drivers/ub/urma/ubcore/ubcm/ubcm_log.c new file mode 100644 index 000000000000..4fc732fe7587 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_log.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcm log file + * Author: Qian Guoxin + * Create: 2025-01-10 + * Note: + * History: 2024-01-10: Create file + */ + +#include +#include "ubcm_log.h" + +uint32_t g_ubcm_log_level = UBCM_LOG_LEVEL_INFO; diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_log.h b/drivers/ub/urma/ubcore/ubcm/ubcm_log.h new file mode 100644 index 000000000000..048c5cdfcb92 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_log.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcm log head file + * Author: Qian Guoxin + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UBCM_LOG_H +#define UBCM_LOG_H + +#include +#include + +enum ubcm_log_level { + UBCM_LOG_LEVEL_EMERG = 0, + UBCM_LOG_LEVEL_ALERT = 1, + UBCM_LOG_LEVEL_CRIT = 2, + UBCM_LOG_LEVEL_ERR = 3, + UBCM_LOG_LEVEL_WARNING = 4, + UBCM_LOG_LEVEL_NOTICE = 5, + UBCM_LOG_LEVEL_INFO = 6, + UBCM_LOG_LEVEL_DEBUG = 7, + UBCM_LOG_LEVEL_MAX = 8 +}; + +/* add log head info, "LogTag_UBCM|function|[line]| */ +#define UBCM_LOG_TAG "LogTag_UBCM" +#define ubcm_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBCM_LOG_TAG, __func__, __LINE__, ##args) + +#define UBCM_RATELIMIT_INTERVAL (5 * HZ) +#define UBCM_RATELIMIT_BURST 100 + +extern uint32_t g_ubcm_log_level; + +#define ubcm_log_info(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_INFO) \ + ubcm_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_err(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_ERR) \ + ubcm_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_warn(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_WARNING) \ + ubcm_log(warn, __VA_ARGS__); \ + } while (0) + +/* No need to record debug log by printk_ratelimited */ +#define ubcm_log_debug(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_DEBUG) \ + ubcm_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define ubcm_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_INFO)) \ + ubcm_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_ERR)) \ + ubcm_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_WARNING)) \ + ubcm_log(warn, __VA_ARGS__); \ + } while (0) + +#endif /* UBCM_LOG_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c new file mode 100644 index 000000000000..1485b0dfb54a --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c @@ -0,0 +1,1210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ub_mad datapath implementation + * Author: Chen Yutao + * Create: 2025-02-21 + * Note: + * History: 2025-02-21: create file + */ + +#include +#include + +#include +#include "ubcore_topo_info.h" +#include "net/ubcore_cm.h" +#include "ubcm_log.h" + +#include "ub_mad_priv.h" + +/** reliable communication **/ +/* msn mgr */ +void ubmad_init_msn_mgr(struct ubmad_msn_mgr *msn_mgr) +{ + uint32_t idx; + + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) + INIT_HLIST_HEAD(&msn_mgr->msn_hlist[idx]); + spin_lock_init(&msn_mgr->msn_hlist_lock); + atomic64_set(&msn_mgr->msn_gen, 0); // msn starts from 0 +} + +void ubmad_uninit_msn_mgr(struct ubmad_msn_mgr *msn_mgr) +{ + struct ubmad_msn_node *msn_node; + struct hlist_node *next; + unsigned long flag; + uint32_t idx; + + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) { + hlist_for_each_entry_safe(msn_node, next, + &msn_mgr->msn_hlist[idx], node) { + hlist_del(&msn_node->node); + kfree(msn_node); + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); +} + +/* msn node */ +static struct ubmad_msn_node * +ubmad_create_msn_node(uint64_t msn, struct ubmad_msn_mgr *msn_mgr) +{ + struct ubmad_msn_node *msn_node; + unsigned long flag; + uint32_t hash; + + msn_node = kzalloc(sizeof(struct ubmad_msn_node), GFP_KERNEL); + if (IS_ERR_OR_NULL(msn_node)) + return ERR_PTR(-ENOMEM); + + msn_node->msn = msn; + INIT_HLIST_NODE(&msn_node->node); + + hash = jhash(&msn, sizeof(uint64_t), 0) % UBMAD_MSN_HLIST_SIZE; + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_add_head(&msn_node->node, &msn_mgr->msn_hlist[hash]); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + + return msn_node; +} + +static void ubmad_destroy_msn_node(struct ubmad_msn_node *msn_node, + struct ubmad_msn_mgr *msn_mgr) +{ + unsigned long flag; + + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_del(&msn_node->node); + kfree(msn_node); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); +} + +/* retransmission work */ +static void ubmad_rt_work_handler(struct work_struct *work) +{ + struct delayed_work *delay_work = + container_of(work, struct delayed_work, work); + struct ubmad_rt_work *rt_work = + container_of(delay_work, struct ubmad_rt_work, delay_work); + + struct ubmad_msn_mgr *msn_mgr = rt_work->msn_mgr; + unsigned long flag; + struct ubmad_msn_node *cur; + struct hlist_node *next; + uint32_t hash = jhash(&rt_work->msn, sizeof(uint64_t), 0) % + UBMAD_MSN_HLIST_SIZE; + bool found = false; + + struct ubmad_msg *msg = rt_work->msg; + uint64_t sge_addr = (uint64_t)msg; + uint32_t sge_idx; + struct ubmad_jetty_resource *rsrc = rt_work->rsrc; + + // try to find msn_node + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &msn_mgr->msn_hlist[hash], node) { + if (cur->msn == rt_work->msn) { + found = true; + break; + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + + // found indicates not ack. Need to repost + if (found && rt_work->rt_cnt <= UBMAD_MAX_RETRY_CNT) { + rt_work->rt_cnt++; + if (ubmad_repost_send(msg, rt_work->tjetty, rsrc->send_seg, + rt_work->rt_wq, rsrc) == 0) + return; + ubcm_log_err("repost send failed. msg type %d msn %llu\n", + msg->msg_type, msg->msn); + } + ubcm_log_info("Do not repost, found: %u, rt_work->rt_cnt: %u.\n", + (uint32_t)found, rt_work->rt_cnt); + + /* not found OR repost failed + * put data msg sge id + */ + if (sge_addr < rsrc->send_seg->seg.ubva.va) { + ubcm_log_err("sge addr should not < seg addr\n"); + } else { + sge_idx = (sge_addr - rsrc->send_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + ubmad_bitmap_put_id(rsrc->send_seg_bitmap, + sge_idx); // get in ubmad_do_post_send() + } + kfree(rt_work); +} + +struct ubmad_rt_work *ubmad_create_rt_work(struct workqueue_struct *rt_wq, + struct ubmad_msn_mgr *msn_mgr, + struct ubmad_msg *msg, + struct ubmad_tjetty *tjetty, + struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_rt_work *rt_work; + + rt_work = kzalloc(sizeof(struct ubmad_rt_work), + GFP_KERNEL); // free in ubmad_rt_work_handler() + if (IS_ERR_OR_NULL(rt_work)) + return ERR_PTR(-ENOMEM); + rt_work->msn = msg->msn; + rt_work->msn_mgr = msn_mgr; + rt_work->msg = msg; + rt_work->tjetty = tjetty; + rt_work->rsrc = rsrc; + rt_work->rt_wq = rt_wq; + rt_work->rt_cnt = 0; + + INIT_DELAYED_WORK(&rt_work->delay_work, ubmad_rt_work_handler); + if (queue_delayed_work(rt_wq, &rt_work->delay_work, + UBMAD_RETRANSMIT_PERIOD) != true) { + ubcm_log_err("queue rt work failed\n"); + kfree(rt_work); + return NULL; + } + + return rt_work; +} + +/* seid_node */ +static struct ubmad_seid_node * +ubmad_get_seid_node(union ubcore_eid *seid, struct ubmad_jetty_resource *rsrc) +{ + unsigned long flag; + struct ubmad_seid_node *cur; + struct hlist_node *next; + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &rsrc->seid_hlist[hash], node) { + if (memcmp(&cur->seid, seid, sizeof(union ubcore_eid)) == 0) { + kref_get(&cur->kref); + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + return cur; + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + + return NULL; +} + +static void ubmad_release_seid_node(struct kref *kref) +{ + struct ubmad_seid_node *seid_node = + container_of(kref, struct ubmad_seid_node, kref); + struct ubmad_bitmap *rx_bitmap = seid_node->rx_bitmap; + + if (rx_bitmap) + ubmad_destroy_bitmap(rx_bitmap); + kfree(seid_node); +} + +static void ubmad_put_seid_node(struct ubmad_seid_node *seid_node) +{ + kref_put(&seid_node->kref, ubmad_release_seid_node); +} + +/* need to put twice to release seid_node. + * First put for kref_get() is called by user after using created seid_node. + * Second put for kref_init() is in ubmad_uninit_seid_hlist(). + */ +static struct ubmad_seid_node * +ubmad_create_seid_node(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_seid_node *seid_node; + uint32_t hash; + unsigned long flag; + + seid_node = kzalloc(sizeof(struct ubmad_seid_node), GFP_KERNEL); + if (IS_ERR_OR_NULL(seid_node)) + return ERR_PTR(-ENOMEM); + + seid_node->rx_bitmap = ubmad_create_bitmap(UBMAD_RX_BITMAP_SIZE); + if (IS_ERR_OR_NULL(seid_node->rx_bitmap)) { + kfree(seid_node); + return ERR_PTR(-ENOMEM); + } + + seid_node->seid = *seid; + INIT_HLIST_NODE(&seid_node->node); + kref_init(&seid_node->kref); + atomic64_set(&seid_node->expected_msn, 0); + + hash = jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_add_head(&seid_node->node, &rsrc->seid_hlist[hash]); + kref_get(&seid_node->kref); // put by user outside this func + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + + return seid_node; +} + +static void ubmad_delete_seid_node(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + struct ubmad_seid_node *cur; + struct hlist_node *next; + unsigned long flag; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &rsrc->seid_hlist[hash], node) { + if (memcmp(&cur->seid, seid, sizeof(union ubcore_eid)) == 0) { + hlist_del(&cur->node); + ubmad_put_seid_node(cur); + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); +} + +void ubmad_init_seid_hlist(struct ubmad_jetty_resource *rsrc) +{ + uint32_t idx; + + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) + INIT_HLIST_HEAD(&rsrc->seid_hlist[idx]); + spin_lock_init(&rsrc->seid_hlist_lock); +} + +void ubmad_uninit_seid_hlist(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_seid_node *seid_node; + struct hlist_node *next; + unsigned long flag; + uint32_t idx; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + for (idx = 0; idx < UBMAD_MAX_SEID_NUM; idx++) { + hlist_for_each_entry_safe(seid_node, next, + &rsrc->seid_hlist[idx], node) { + hlist_del(&seid_node->node); + ubmad_put_seid_node(seid_node); + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); +} + +/** post **/ +// prepare msg to send +static int ubmad_prepare_msg(uint64_t sge_addr, struct ubmad_send_buf *send_buf, + uint32_t msn, struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + + if (sizeof(struct ubmad_msg) + send_buf->payload_len > + UBMAD_SGE_MAX_LEN) { + ubcm_log_err( + "msg header %lu + payload_len %u exceeds sge max length %u\n", + sizeof(struct ubmad_msg), send_buf->payload_len, + UBMAD_SGE_MAX_LEN); + return -EINVAL; + } + if (send_buf->msg_type == UBMAD_AUTHN_DATA && + send_buf->payload_len != 0) { + ubcm_log_err("Invalid authentication payload_len %u\n", + send_buf->payload_len); + return -EINVAL; + } + + msg->version = UBMAD_MSG_VERSION_0; + msg->msn = msn; + msg->msg_type = send_buf->msg_type; + msg->payload_len = send_buf->payload_len; + if (send_buf->msg_type == UBMAD_CONN_DATA || + send_buf->msg_type == UBMAD_UBC_CONN_DATA) + // send_buf will be freed by cm. mad needs to memcpy user data to send sge. + (void)memcpy((void *)msg->payload, send_buf->payload, + send_buf->payload_len); + + return 0; +} + +static int ubmad_do_post_send_conn_data(struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty, + struct ubcore_jfs_wr *jfs_wr, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + uint64_t sge_addr = jfs_wr->send.src.sge->addr; + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + uint64_t msn = msg->msn; + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + + struct ubmad_msn_node *msn_node; + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + + int ret; + + /* create msn_node before post to avoid recv ack before msn_node created and wrongly trigger + * fast retransmission. + */ + msn_node = ubmad_create_msn_node(msn, &tjetty->msn_mgr); + if (IS_ERR_OR_NULL(msn_node)) { + ubcm_log_err("create msn_node failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + return -1; + } + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold, tx_in_queue: %u.\n", + (uint32_t)atomic_read(&rsrc->tx_in_queue)); + ret = -1; + goto destroy_msn_node; + } + ret = ubcore_post_jetty_send_wr(jetty, jfs_wr, &jfs_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post send failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + atomic_fetch_sub(1, &rsrc->tx_in_queue); + goto destroy_msn_node; + } + + ubcm_log_info("send conn data successfully. msn %llu eid " EID_FMT "\n", + msn, EID_ARGS(*dst_eid)); + return 0; + +destroy_msn_node: + ubmad_destroy_msn_node(msn_node, &tjetty->msn_mgr); + return ret; +} + +static int ubmad_do_post_send(struct ubmad_jetty_resource *rsrc, + struct ubmad_tjetty *tjetty, + struct ubmad_send_buf *send_buf, uint64_t msn, + struct workqueue_struct *rt_wq) +{ + uint32_t sge_idx; + uint64_t sge_addr; + struct ubcore_jetty *jetty = rsrc->jetty; + struct ubmad_msg *msg; + + struct ubcore_sge sge = { 0 }; + struct ubcore_jfs_wr jfs_wr = { 0 }; + + int ret; + + /* prepare */ + /* get sge + * data msg sge id put in ubmad_rt_work_handler(). + * ack sge id put in ubmad_send_work_handler() + */ + sge_idx = ubmad_bitmap_get_id(rsrc->send_seg_bitmap); + if (sge_idx >= rsrc->send_seg_bitmap->size) { + ubcm_log_err("get sge_idx failed\n"); + return -1; + } + sge_addr = rsrc->send_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + + // prepare msg, msg stored in sge + ret = ubmad_prepare_msg(sge_addr, send_buf, msn, jetty, tjetty); + if (ret != 0) { + ubcm_log_err("prepare msg failed. ret %d payload_len %u\n", ret, + send_buf->payload_len); + goto put_id; + } + + // prepare wr + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty->tjetty; + sge.addr = sge_addr; + sge.len = send_buf->payload_len + + sizeof(struct ubmad_msg); // only need to send data len + sge.tseg = rsrc->send_seg; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + jfs_wr.user_ctx = sge_addr; + jfs_wr.flag.bs.complete_enable = 1; + + /* post */ + msg = (struct ubmad_msg *)sge_addr; + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ret = ubmad_do_post_send_conn_data(jetty, tjetty, &jfs_wr, + rt_wq, rsrc); + break; + case UBMAD_CONN_ACK: + case UBMAD_UBC_CONN_ACK: + case UBMAD_AUTHN_DATA: + case UBMAD_AUTHN_ACK: + ubcm_log_warn_rl("No need to send ack, msg->msg_type: %d", + (int)msg->msg_type); + ret = -1; + break; + default: + ubcm_log_err("invalid msg_type %d\n", msg->msg_type); + ret = -EINVAL; + } + if (ret != 0) { + ubcm_log_err("post send failed. msg type %d ret %d\n", + msg->msg_type, ret); + goto put_id; + } + + return 0; + +put_id: + (void)ubmad_bitmap_put_id(rsrc->send_seg_bitmap, sge_idx); + return ret; +} + +// for UBMAD_CONN_DATA, UBMAD_AUTHN_DATA +int ubmad_post_send(struct ubcore_device *device, + struct ubmad_send_buf *send_buf, + struct ubmad_send_buf **bad_send_buf) +{ + struct ubmad_device_priv *dev_priv = NULL; + struct ubmad_jetty_resource *rsrc; + struct ubcore_jetty *wk_jetty; // well-known jetty + struct ubmad_tjetty *wk_tjetty; + union ubcore_eid dst_primary_eid = { 0 }; + int ret; + + dev_priv = ubmad_get_device_priv(device); // put below + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s.\n", + device->dev_name); + return -1; + } + if (!dev_priv->valid) { + ubcm_log_err("dev_priv rsrc not inited. dev_name: %s.\n", + device->dev_name); + ret = -1; + goto put_device_priv; + } + + switch (send_buf->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + rsrc = &dev_priv->jetty_rsrc[0]; + break; + case UBMAD_AUTHN_DATA: + rsrc = &dev_priv->jetty_rsrc[1]; + break; + default: + ubcm_log_err("Invalid msg_type: %d.\n", + (int)send_buf->msg_type); + ret = -EINVAL; + goto put_device_priv; + } + wk_jetty = rsrc->jetty; + + /* import well-known jetty */ + // unimport in ubmad_uninit_jetty_rsrc() + ret = ubcore_get_primary_eid(&send_buf->dst_eid, &dst_primary_eid); + if (ret != 0) { + ubcm_log_err("get primary eid failed\n"); + goto put_device_priv; + } + wk_tjetty = ubmad_import_jetty(device, rsrc, &dst_primary_eid); + if (IS_ERR_OR_NULL(wk_tjetty)) { + ubcm_log_err("import jetty failed. eid " EID_FMT "\n", + EID_ARGS(dst_primary_eid)); + ret = -1; + goto put_device_priv; + } + + /* post send */ + ret = ubmad_do_post_send( + rsrc, wk_tjetty, send_buf, + atomic64_fetch_inc(&wk_tjetty->msn_mgr.msn_gen), + dev_priv->rt_wq); + + ubmad_put_tjetty(wk_tjetty); // first put for ubmad_import_jetty() above +put_device_priv: + ubmad_put_device_priv(dev_priv); // get above + return ret; +} + +// post send UBMAD_CONN_ACK when recv conn data +int ubmad_post_send_conn_ack(struct ubmad_jetty_resource *rsrc, + struct ubmad_tjetty *tjetty, uint64_t msn) +{ + struct ubmad_send_buf send_buf = { 0 }; + + send_buf.src_eid = rsrc->jetty->jetty_id.eid; + send_buf.dst_eid = tjetty->tjetty->cfg.id.eid; + send_buf.msg_type = UBMAD_CONN_ACK; + + if (ubmad_do_post_send(rsrc, tjetty, &send_buf, msn, NULL) != 0) { + ubcm_log_err("post send conn ack failed. dst_eid " EID_FMT + ", msn %llu\n", + EID_ARGS(send_buf.dst_eid), msn); + return -1; + } + + return 0; +} + +/* repost send for retransmission of UBMAD_CONN_DATA / UBMAD_UBC_CONN_DATA */ +int ubmad_repost_send_conn_data(struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty, + struct ubcore_jfs_wr *jfs_wr, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + uint64_t sge_addr = jfs_wr->send.src.sge->addr; + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + uint64_t msn = msg->msn; + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + struct ubmad_rt_work *rt_work; + + int ret = -1; + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold.\n"); + return -1; + } + ret = ubcore_post_jetty_send_wr(jetty, jfs_wr, &jfs_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post send failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + atomic_fetch_sub(1, &rsrc->tx_in_queue); + return ret; + } + + // create rt_work after post to avoid rt_work handled before first post. + rt_work = ubmad_create_rt_work(rt_wq, &tjetty->msn_mgr, msg, tjetty, + rsrc); + if (IS_ERR_OR_NULL(rt_work)) { + ubcm_log_err("create rt_work failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + return -1; + } + + ubcm_log_info("send conn data successfully. msn %llu eid " EID_FMT "\n", + msn, EID_ARGS(*dst_eid)); + return 0; +} + +int ubmad_repost_send(struct ubmad_msg *msg, struct ubmad_tjetty *tjetty, + struct ubcore_target_seg *send_seg, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + uint64_t sge_addr = (uint64_t)msg; + struct ubcore_sge sge = { 0 }; + struct ubcore_jfs_wr jfs_wr = { 0 }; + int ret; + + ubcm_log_info("timeout and repost. msn %llu eid " EID_FMT "\n", + msg->msn, EID_ARGS(*dst_eid)); + + // prepare wr + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty->tjetty; + sge.addr = sge_addr; + sge.len = msg->payload_len + sizeof(struct ubmad_msg); + sge.tseg = send_seg; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + jfs_wr.user_ctx = sge_addr; + jfs_wr.flag.bs.complete_enable = 1; + (void)jfs_wr; + + /* post */ + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ubcm_log_err("Invalid msg_type: %d", (int)msg->msg_type); + ret = -1; + break; + default: + ubcm_log_err("invalid msg_type %d. msn %llu eid " EID_FMT "\n", + msg->msg_type, msg->msn, EID_ARGS(*dst_eid)); + return -EINVAL; + } + + if (ret != 0) { + ubcm_log_err( + "repost send failed. msg type %d msn %llu eid " EID_FMT + "\n", + msg->msg_type, msg->msn, EID_ARGS(*dst_eid)); + return ret; + } + + return 0; +} + +void ubmad_post_send_close_req(struct ubmad_jetty_resource *rsrc, + struct ubcore_tjetty *tjetty) +{ + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + struct ubcore_jfs_wr jfs_wr = { 0 }; + struct ubcore_sge sge = { 0 }; + struct ubmad_msg *msg; + uint64_t sge_addr; + uint32_t sge_idx; + int ret; + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold, tx_in_queue: %u.\n", + (uint32_t)atomic_read(&rsrc->tx_in_queue)); + return; + } + + sge_idx = ubmad_bitmap_get_id(rsrc->send_seg_bitmap); + if (sge_idx >= rsrc->send_seg_bitmap->size) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Failed to get sge_idx: %u.\n", sge_idx); + return; + } + + sge_addr = rsrc->send_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + msg = (struct ubmad_msg *)sge_addr; + msg->version = UBMAD_MSG_VERSION_0; + msg->msg_type = UBMAD_CLOSE_REQ; + msg->payload_len = 0; + msg->msn = 0; // UBMAD_CLOSE_REQ is unreliable, msn does not work + + sge.addr = sge_addr; + sge.len = (uint32_t)sizeof(struct ubmad_msg); + sge.tseg = rsrc->send_seg; + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + + ret = ubcore_post_jetty_send_wr(rsrc->jetty, &jfs_wr, &jfs_bad_wr); + if (ret != 0) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_warn( + "Failed to send close request, ret: %d, jetty_id: %u.\n", + ret, rsrc->jetty->jetty_id.id); + } +} + +/* + * 1. fill up jfr in ubmad_open_device() for first post_send of each jetty0 pair. + * 2. supplement one consumed wqe to jfr after poll jfc_r in ubmad_jfce_handler_r(). + * 3. this function is private and in ubmad range. + */ +int ubmad_post_recv(struct ubmad_jetty_resource *rsrc) +{ + uint32_t sge_idx; + uint64_t sge_addr; + struct ubcore_sge sge = { 0 }; + struct ubcore_jfr_wr jfr_wr = { 0 }; + struct ubcore_jfr_wr *jfr_bad_wr = NULL; + int ret; + + sge_idx = ubmad_bitmap_get_id( + rsrc->recv_seg_bitmap); // put in ubmad_recv_work_handler() + if (sge_idx >= rsrc->recv_seg_bitmap->size) { + ubcm_log_err("get sge_idx failed\n"); + return -1; + } + sge_addr = rsrc->recv_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + + sge.addr = sge_addr; + sge.len = UBMAD_SGE_MAX_LEN; + sge.tseg = rsrc->recv_seg; + jfr_wr.src.sge = &sge; + jfr_wr.src.num_sge = 1; + jfr_wr.user_ctx = sge_addr; + ret = ubcore_post_jetty_recv_wr(rsrc->jetty, &jfr_wr, &jfr_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post recv failed. ret %d\n", ret); + return ret; + } + + return 0; +} + +/** poll **/ +/* process msg after recv */ +static int ubmad_cm_process_msg(struct ubcore_cr *cr, + union ubcore_eid *local_eid, + struct ubmad_msg *msg, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_recv_cr recv_cr = { 0 }; + + recv_cr.cr = cr; + recv_cr.local_eid = *local_eid; + recv_cr.msg_type = msg->msg_type; + recv_cr.payload = (uint64_t)msg->payload; + recv_cr.payload_len = msg->payload_len; + + if (agent_priv->agent.recv_handler != NULL && + agent_priv->agent.recv_handler(&agent_priv->agent, &recv_cr) != 0) { + ubcm_log_err("recv_handler exec failed\n"); + return -1; + } + + return 0; +} + +/* Return value: true - msn is valid and message processed; */ +/* false - msn is invalid and message dropped */ +bool ubmad_process_rx_msn(struct ubmad_bitmap *rx_bitmap, uint64_t msn) +{ + bool result; + uint32_t i; + + if (rx_bitmap->right_end >= UBMAD_RX_BITMAP_SIZE && + msn <= rx_bitmap->right_end - UBMAD_RX_BITMAP_SIZE) + return false; + + if (msn <= rx_bitmap->right_end) { + result = ubmad_bitmap_test_id( + rx_bitmap, (uint32_t)(msn % UBMAD_RX_BITMAP_SIZE)); + } else { + for (i = rx_bitmap->right_end + 1; i < msn; i++) + (void)ubmad_bitmap_put_id(rx_bitmap, + i % UBMAD_RX_BITMAP_SIZE); + rx_bitmap->right_end = msn; + ubmad_bitmap_set_id(rx_bitmap, msn); + result = true; + } + + return result; +} + +static int ubmad_process_conn_data(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + struct ubmad_seid_node *seid_node; + int ret = 0; + + // get seid_node + seid_node = ubmad_get_seid_node(seid, rsrc); // put below + if (IS_ERR_OR_NULL(seid_node)) { + // destroy in ubmad_uninit_seid_hlist(). No need to destroy even err below. + seid_node = ubmad_create_seid_node(seid, rsrc); + if (IS_ERR_OR_NULL(seid_node)) { + ubcm_log_err( + "create seid_node failed for first msg. msn %llu seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + return -1; + } + } + + ubcm_log_info( + "Finish to recv request. msn %llu right_end %llu, seid " EID_FMT + "\n", + msg->msn, seid_node->rx_bitmap->right_end, EID_ARGS(*seid)); + + ret = ubmad_cm_process_msg(cr, &rsrc->jetty->jetty_id.eid, msg, + agent_priv); + if (ret != 0) + ubcm_log_err("cm process msg failed. msn %llu, seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + + ubmad_put_seid_node(seid_node); + return ret; +} + +static void ubmad_process_conn_ack(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + struct ubmad_tjetty *tjetty; + + struct ubmad_msn_mgr *msn_mgr; + unsigned long flag; + struct ubmad_msn_node *cur; + struct hlist_node *next; + uint32_t hash = + jhash(&msg->msn, sizeof(uint64_t), 0) % UBMAD_MSN_HLIST_SIZE; + + tjetty = ubmad_get_tjetty(seid, rsrc); // put below + if (IS_ERR_OR_NULL(tjetty)) { + ubcm_log_err("get tjetty failed. eid " EID_FMT "\n", + EID_ARGS(*seid)); + return; + } + + // try to remove msn_node from msn_hlist + msn_mgr = &tjetty->msn_mgr; + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &msn_mgr->msn_hlist[hash], node) { + if (cur->msn == msg->msn) { + hlist_del(&cur->node); + kfree(cur); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + goto put_tjetty; + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + // msn_node not in msn_hlist, indicates already removed by previous ack with same msn + ubcm_log_info("redundant ack. msn %llu seid " EID_FMT "\n", msg->msn, + EID_ARGS(*seid)); + +put_tjetty: + ubmad_put_tjetty(tjetty); + ubcm_log_info("recv conn ack. msn %llu seid " EID_FMT "\n", msg->msn, + EID_ARGS(*seid)); +} + +static int ubmad_process_authn_data(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + int ret; + + ret = ubmad_cm_process_msg(cr, &rsrc->jetty->jetty_id.eid, msg, + agent_priv); + if (ret != 0) + ubcm_log_err("cm process msg failed. msn %llu, seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + + return ret; +} + +static inline void ubmad_process_close_req(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc) +{ + ubmad_remove_tjetty(&cr->remote_id.eid, rsrc); + ubmad_delete_seid_node(&cr->remote_id.eid, rsrc); + + ubcm_log_info("Finish to process close request, remote eid: " EID_FMT + ", remote id: %u.\n", + EID_ARGS(cr->remote_id.eid), cr->remote_id.id); +} + +static int ubmad_process_msg(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + int ret = 0; + + if (cr->completion_len < sizeof(struct ubmad_msg)) { + ubcm_log_err( + "even header is incomplete. completion_len %u < header size %lu\n", + cr->completion_len, sizeof(struct ubmad_msg)); + return -EINVAL; + } + if (cr->completion_len != sizeof(struct ubmad_msg) + msg->payload_len) { + ubcm_log_err( + "completion_len not right. completion_len %u != header %lu + payload len %u\n", + cr->completion_len, sizeof(struct ubmad_msg), + msg->payload_len); + return -EINVAL; + } + + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ret = ubmad_process_conn_data(cr, rsrc, dev_priv, agent_priv); + break; + case UBMAD_CONN_ACK: + case UBMAD_UBC_CONN_ACK: + ubmad_process_conn_ack(cr, rsrc, dev_priv, agent_priv); + break; + case UBMAD_AUTHN_DATA: + ret = ubmad_process_authn_data(cr, rsrc, agent_priv); + break; + case UBMAD_CLOSE_REQ: + ubmad_process_close_req(cr, rsrc); + break; + default: + ubcm_log_err("Invalid msg_type: %u.\n", msg->msg_type); + ret = -EINVAL; + } + + return ret; +} + +/* send_ops for ubcore connection manager */ +// for UBMAD_UBC_CONN_DATA +int ubmad_ubc_send(struct ubcore_device *device, + struct ubcore_cm_send_buf *send_buf) +{ + struct ubmad_send_buf *bad_send_buf = NULL; + struct ubmad_device_priv *dev_priv; + int ret; + + if (device == NULL || send_buf == NULL) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + if (send_buf->msg_type != UBCORE_CM_CONN_MSG) { + ubcm_log_err("Invalid message type: %u.\n", send_buf->msg_type); + return -EINVAL; + } + + dev_priv = ubmad_get_device_priv(device); + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return -1; + } + + send_buf->src_eid = dev_priv->eid_info.eid; + ubmad_put_device_priv(dev_priv); + + ubcm_log_info("ubc dev: %s, s_eid: " EID_FMT ", d_eid: " EID_FMT " ", + device->dev_name, EID_ARGS(send_buf->src_eid), + EID_ARGS(send_buf->dst_eid)); + + ret = ubmad_post_send(device, (struct ubmad_send_buf *)send_buf, + &bad_send_buf); + if (ret != 0) + ubcm_log_err("Failed to send message, ret: %d, length: %u.\n", + ret, send_buf->payload_len); + + return ret; +} + +/* jfce work handler */ +// polling here only indicates if send successfully +static void ubmad_send_work_handler(struct ubmad_device_priv *dev_priv, + struct ubmad_jfce_work *jfce_work) +{ + struct ubmad_jetty_resource *rsrc; + struct ubmad_msg *msg; + uint32_t sge_idx; + int ret; + int cr_cnt; + struct ubcore_cr cr = {0}; + struct ubmad_agent_priv *agent_priv = jfce_work->agent_priv; + struct ubcore_jfc *jfc = jfce_work->jfc; + struct ubmad_send_cr send_cr = {0}; + + cr_cnt = 0; + + rsrc = ubmad_get_jetty_rsrc_by_jfc_s(dev_priv, jfc); + if (IS_ERR_OR_NULL(rsrc)) { + ubcm_log_err("Failed to match jfc for send.\n"); + return; + } + + do { + cr_cnt = ubcore_poll_jfc(jfc, 1, &cr); + if (cr_cnt < 0) { + ubcm_log_err("cr_cnt %d < 0\n", cr_cnt); + break; + } + if (cr_cnt == 0) + break; + + /* cr_cnt == 1 */ + atomic_dec(&rsrc->tx_in_queue); + if (cr.status == UBCORE_CR_SUCCESS) { + send_cr.cr = &cr; + if (agent_priv->agent.send_handler != NULL && + agent_priv->agent.send_handler(&agent_priv->agent, + &send_cr) != 0) + ubcm_log_err("send handler failed. cr_cnt %d\n", + cr_cnt); + } + + // put ack msg sge id + if (cr.user_ctx < rsrc->send_seg->seg.ubva.va) { + ubcm_log_err( + "invalid cr.user_ctx. sge addr should not < seg addr\n"); + } else { + msg = (struct ubmad_msg *)cr.user_ctx; + sge_idx = (cr.user_ctx - rsrc->send_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + ubmad_bitmap_put_id( + rsrc->send_seg_bitmap, + sge_idx); // get in ubmad_do_post_send() + } + if (cr.status != UBCORE_CR_SUCCESS) { + ubcm_log_err( + "Tx status error. cr_cnt %d, status %d, comp_len %u, user_ctx: 0x%llx.\n", + cr_cnt, cr.status, cr.completion_len, + cr.user_ctx); + break; + } + } while (cr_cnt > 0); + + ret = ubcore_rearm_jfc(jfc, false); + ubcm_log_info("Rearm send jfc, jfc_id: %u, ret: %d.\n", jfc->id, ret); +} + +// polling here indicates if recv msg +static void ubmad_recv_work_handler(struct ubmad_device_priv *dev_priv, + struct ubmad_jfce_work *jfce_work) +{ + struct ubcore_jfc *jfc = jfce_work->jfc; + struct ubmad_jetty_resource *rsrc; + struct ubcore_cr cr = {0}; + uint32_t sge_idx; + int ret; + int cr_cnt; + + cr_cnt = 0; + + rsrc = ubmad_get_jetty_rsrc_by_jfc_r(dev_priv, jfc); + if (IS_ERR_OR_NULL(rsrc)) { + ubcm_log_err("Failed to match jfc for recv.\n"); + return; + } + + do { + cr_cnt = ubcore_poll_jfc(jfc, 1, &cr); + if (cr_cnt < 0) { + ubcm_log_err("cr_cnt %d < 0\n", cr_cnt); + break; + } + if (cr_cnt == 0) + break; + + /* cr_cnt == 1 */ + if (cr.status == UBCORE_CR_SUCCESS) { + if (ubmad_process_msg(&cr, rsrc, dev_priv, + jfce_work->agent_priv) != 0) + ubcm_log_err("process msg failed\n"); + } + + // put sge id + if (cr.user_ctx < rsrc->recv_seg->seg.ubva.va) { + ubcm_log_err( + "invalid cr.user_ctx. sge addr should not < seg addr\n"); + } else { + sge_idx = (cr.user_ctx - rsrc->recv_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + // get in ubmad_post_recv() + ubmad_bitmap_put_id(rsrc->recv_seg_bitmap, sge_idx); + } + + // supplement one consumed wqe + if (ubmad_post_recv(rsrc) != 0) + ubcm_log_err("post recv in jfce handler failed.\n"); + + if (cr.status != UBCORE_CR_SUCCESS) { + ubcm_log_err( + "Rx status error. cr_cnt %d, status %d, comp_len %u, user_ctx: 0x%llx.\n", + cr_cnt, cr.status, cr.completion_len, + cr.user_ctx); + break; + } + } while (cr_cnt > 0); + + ret = ubcore_rearm_jfc(jfc, false); + ubcm_log_info("Rearm recv jfc, jfc_id: %u, ret: %d.\n", jfc->id, ret); +} + +// continue from ubmad_jfce_handler() +static void ubmad_jfce_work_handler(struct work_struct *work) +{ + struct ubmad_jfce_work *jfce_work = + container_of(work, struct ubmad_jfce_work, work); + struct ubcore_device *dev = jfce_work->jfc->ub_dev; + struct ubmad_device_priv *dev_priv = NULL; + + dev_priv = ubmad_get_device_priv(dev); // put below + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("fail to get dev_priv, dev_name: %s.\n", + dev->dev_name); + goto put_agent_priv; + } + if (!dev_priv->valid) { + ubcm_log_err_rl("dev_priv rsrc not inited. dev_name: %s.\n", + dev->dev_name); + goto put_device_priv; + } + + switch (jfce_work->type) { + case UBMAD_SEND_WORK: + ubmad_send_work_handler(dev_priv, jfce_work); + break; + case UBMAD_RECV_WORK: + ubmad_recv_work_handler(dev_priv, jfce_work); + break; + default: + ubcm_log_err("unknown work type %d\n", jfce_work->type); + } + +put_device_priv: + ubmad_put_device_priv(dev_priv); // get above +put_agent_priv: + ubmad_put_agent_priv( + jfce_work->agent_priv); // get in ubmad_jfce_handler() + kfree(jfce_work); // alloc in ubmad_jfce_handler() +} + +/* jfce handler */ +// see ubmad_jfce_work_handler() then +static void ubmad_jfce_handler(struct ubcore_jfc *jfc, + enum ubmad_jfce_work_type type) +{ + struct ubmad_agent_priv *agent_priv = NULL; + struct ubmad_jfce_work *jfce_work; + int ret; + + agent_priv = ubmad_get_agent_priv( + jfc->ub_dev); // put in ubmad_jfce_work_handler() + if (IS_ERR_OR_NULL(agent_priv)) { + ubcm_log_err("Failed to get agent_priv, dev_name: %s.\n", + jfc->ub_dev->dev_name); + return; + } + ubcm_log_info("Start to handle jfce, type: %d, jfc_id: %u.\n", type, jfc->id); + + // free in ubmad_jfce_work_handler() + jfce_work = kzalloc(sizeof(struct ubmad_jfce_work), GFP_ATOMIC); + if (IS_ERR_OR_NULL(jfce_work)) + goto put_agent_priv; + jfce_work->type = type; + jfce_work->jfc = jfc; + jfce_work->agent_priv = agent_priv; + + INIT_WORK(&jfce_work->work, ubmad_jfce_work_handler); + ret = queue_work(agent_priv->jfce_wq, &jfce_work->work); + if (!ret) { + ubcm_log_err("queue work failed. ret %d\n", ret); + goto free_work; + } + return; + +free_work: + kfree(jfce_work); +put_agent_priv: + ubmad_put_agent_priv(agent_priv); +} + +void ubmad_jfce_handler_s(struct ubcore_jfc *jfc) +{ + ubmad_jfce_handler(jfc, UBMAD_SEND_WORK); +} + +void ubmad_jfce_handler_r(struct ubcore_jfc *jfc) +{ + ubmad_jfce_handler(jfc, UBMAD_RECV_WORK); +} diff --git a/drivers/ub/urma/ubcore/ubcore_cdev_file.c b/drivers/ub/urma/ubcore/ubcore_cdev_file.c new file mode 100644 index 000000000000..fad842d39653 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cdev_file.c @@ -0,0 +1,1274 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cdev file + * Author: Qian Guoxin + * Create: 2024-02-05 + * Note: + * History: 2024-02-05: Create file + */ + +#include +#include +#include + +#include +#include + +#include "ubcore_log.h" +#include "ubcore_device.h" +#include "ubcore_cdev_file.h" + +#define UBCORE_MAX_VALUE_LEN 24 +#define UBCORE_READ_ONLY_MODE 0444 +#define UBCORE_RESERVED_JETTY_PARAM_NUM 2 + +/* callback information */ +typedef ssize_t (*ubcore_show_attr_cb)(struct ubcore_device *dev, char *buf); +typedef ssize_t (*ubcore_store_attr_cb)(struct ubcore_device *dev, + const char *buf, size_t len); +typedef ssize_t (*ubcore_show_port_attr_cb)(struct ubcore_device *dev, + char *buf, uint8_t port_id); + +static inline struct ubcore_device * +get_ubcore_device(struct ubcore_logic_device *ldev) +{ + return ldev == NULL ? NULL : ldev->ub_dev; +} + +static ssize_t ubcore_show_dev_attr(struct device *dev, + struct device_attribute *attr, char *buf, + ubcore_show_attr_cb show_cb) +{ + struct ubcore_logic_device *ldev = dev_get_drvdata(dev); + struct ubcore_device *ub_dev = get_ubcore_device(ldev); + + if (!ldev || !ub_dev || !buf) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + return show_cb(ub_dev, buf); +} + +static ssize_t ubcore_store_dev_attr(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, + ubcore_store_attr_cb store_cb) +{ + struct ubcore_logic_device *ldev = dev_get_drvdata(dev); + struct ubcore_device *ub_dev = get_ubcore_device(ldev); + + if (!ldev || !ub_dev || !buf) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + return store_cb(ub_dev, buf, len); +} + +/* interface for exporting device attributes */ +static ssize_t ubdev_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_DEV_NAME, "%s\n", dev->dev_name); +} + +static ssize_t ubdev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, ubdev_show_cb); +} + +static DEVICE_ATTR_RO(ubdev); + +static ssize_t guid_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf( + buf, (UBCORE_EID_STR_LEN + 1) + 1, EID_FMT "\n", + EID_ARGS( + dev->attr.guid)); // The format of GUID is the same as EID. +} + +static ssize_t guid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, guid_show_cb); +} + +static DEVICE_ATTR_RO(guid); + +static ssize_t max_upi_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_upi_cnt); +} + +static ssize_t max_upi_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_upi_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_upi_cnt); + +static ssize_t feature_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "0x%x\n", + dev->attr.dev_cap.feature.value); +} + +static ssize_t feature_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, feature_show_cb); +} + +static DEVICE_ATTR_RO(feature); + +static ssize_t max_jfc_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfc); +} + +static ssize_t max_jfc_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfc_show_cb); +} + +static DEVICE_ATTR_RO(max_jfc); + +static ssize_t max_jfs_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfs); +} + +static ssize_t max_jfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfs_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs); + +static ssize_t max_jfr_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfr); +} + +static ssize_t max_jfr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfr_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr); + +static ssize_t max_jetty_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jetty); +} + +static ssize_t max_jetty_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jetty_show_cb); +} + +static DEVICE_ATTR_RO(max_jetty); + +static ssize_t show_max_jetty_grp_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jetty_grp); +} +static ssize_t max_jetty_grp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_max_jetty_grp_cb); +} +static DEVICE_ATTR_RO(max_jetty_grp); + +static ssize_t show_max_jetty_in_jetty_grp_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jetty_in_jetty_grp); +} +static ssize_t max_jetty_in_jetty_grp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_jetty_in_jetty_grp_cb); +} +static DEVICE_ATTR_RO(max_jetty_in_jetty_grp); + +static ssize_t max_jfc_depth_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfc_depth); +} + +static ssize_t max_jfc_depth_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfc_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfc_depth); + +static ssize_t max_jfs_depth_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfs_depth); +} + +static ssize_t max_jfs_depth_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfs_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_depth); + +static ssize_t max_jfr_depth_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfr_depth); +} + +static ssize_t max_jfr_depth_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfr_depth_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr_depth); + +static ssize_t show_max_jfs_inline_size_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfs_inline_size); +} + +static ssize_t max_jfs_inline_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_jfs_inline_size_cb); +} + +static DEVICE_ATTR_RO(max_jfs_inline_size); + +static ssize_t max_jfs_sge_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfs_sge); +} + +static ssize_t max_jfs_sge_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfs_sge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_sge); + +static ssize_t max_jfs_rsge_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfs_rsge); +} + +static ssize_t max_jfs_rsge_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfs_rsge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfs_rsge); + +static ssize_t max_jfr_sge_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_jfr_sge); +} + +static ssize_t max_jfr_sge_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_jfr_sge_show_cb); +} + +static DEVICE_ATTR_RO(max_jfr_sge); + +static ssize_t max_msg_size_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%llu\n", + dev->attr.dev_cap.max_msg_size); +} + +static ssize_t max_msg_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_msg_size_show_cb); +} + +static DEVICE_ATTR_RO(max_msg_size); + +static ssize_t show_max_read_size_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_read_size); +} +static ssize_t max_read_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_max_read_size_cb); +} +static DEVICE_ATTR_RO(max_read_size); + +static ssize_t show_max_write_size_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_write_size); +} +static ssize_t max_write_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_max_write_size_cb); +} +static DEVICE_ATTR_RO(max_write_size); + +static ssize_t show_max_cas_size_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_cas_size); +} +static ssize_t max_cas_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_max_cas_size_cb); +} +static DEVICE_ATTR_RO(max_cas_size); + +static ssize_t show_max_swap_size_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_swap_size); +} +static ssize_t max_swap_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_max_swap_size_cb); +} +static DEVICE_ATTR_RO(max_swap_size); + +static ssize_t show_max_fetch_and_add_size_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_fetch_and_add_size); +} +static ssize_t max_fetch_and_add_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_fetch_and_add_size_cb); +} +static DEVICE_ATTR_RO(max_fetch_and_add_size); + +static ssize_t show_max_fetch_and_sub_size_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_fetch_and_sub_size); +} +static ssize_t max_fetch_and_sub_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_fetch_and_sub_size_cb); +} +static DEVICE_ATTR_RO(max_fetch_and_sub_size); + +static ssize_t show_max_fetch_and_and_size_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_fetch_and_and_size); +} +static ssize_t max_fetch_and_and_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_fetch_and_and_size_cb); +} +static DEVICE_ATTR_RO(max_fetch_and_and_size); + +static ssize_t show_max_fetch_and_or_size_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_fetch_and_or_size); +} +static ssize_t max_fetch_and_or_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_fetch_and_or_size_cb); +} +static DEVICE_ATTR_RO(max_fetch_and_or_size); + +static ssize_t show_max_fetch_and_xor_size_cb(struct ubcore_device *dev, + char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_fetch_and_xor_size); +} +static ssize_t max_fetch_and_xor_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + show_max_fetch_and_xor_size_cb); +} +static DEVICE_ATTR_RO(max_fetch_and_xor_size); + +static ssize_t show_atomic_feat_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.atomic_feat.value); +} +static ssize_t atomic_feat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_atomic_feat_cb); +} +static DEVICE_ATTR_RO(atomic_feat); + +static ssize_t max_rc_outstd_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%llu\n", + dev->attr.dev_cap.max_rc_outstd_cnt); +} + +static ssize_t max_rc_outstd_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_rc_outstd_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_rc_outstd_cnt); + +static ssize_t trans_mode_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.trans_mode); +} + +static ssize_t trans_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, trans_mode_show_cb); +} + +static DEVICE_ATTR_RO(trans_mode); + +static ssize_t sub_trans_mode_cap_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "0x%hx\n", + dev->attr.dev_cap.sub_trans_mode_cap); +} + +static ssize_t sub_trans_mode_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, sub_trans_mode_cap_show_cb); +} + +static DEVICE_ATTR_RO(sub_trans_mode_cap); + +static ssize_t congestion_ctrl_alg_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.congestion_ctrl_alg); +} + +static ssize_t congestion_ctrl_alg_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, + congestion_ctrl_alg_show_cb); +} + +static ssize_t congestion_ctrl_alg_store_cb(struct ubcore_device *dev, + const char *buf, size_t len) +{ + uint16_t value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret != 0) + return -EINVAL; + + dev->attr.dev_cap.congestion_ctrl_alg = value; + return (ssize_t)len; +} + +static ssize_t congestion_ctrl_alg_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return ubcore_store_dev_attr(dev, attr, buf, len, + congestion_ctrl_alg_store_cb); +} + +static DEVICE_ATTR_RW(congestion_ctrl_alg); // 0644 + +static ssize_t ceq_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.ceq_cnt); +} + +static ssize_t ceq_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, ceq_cnt_show_cb); +} + +static DEVICE_ATTR_RO(ceq_cnt); + +static ssize_t utp_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_utp_cnt); +} + +static ssize_t utp_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, utp_cnt_show_cb); +} + +static DEVICE_ATTR_RO(utp_cnt); + +static ssize_t max_tp_in_tpg_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_tp_in_tpg); +} + +static ssize_t max_tp_in_tpg_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_tp_in_tpg_show_cb); +} + +static DEVICE_ATTR_RO(max_tp_in_tpg); + +static ssize_t max_oor_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_oor_cnt); +} + +static ssize_t max_oor_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_oor_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_oor_cnt); + +static ssize_t mn_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.mn); +} + +static ssize_t mn_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, mn_show_cb); +} + +static DEVICE_ATTR_RO(mn); + +static ssize_t max_netaddr_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_netaddr_cnt); +} + +static ssize_t max_netaddr_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_netaddr_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_netaddr_cnt); + +static ssize_t port_count_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", dev->attr.port_cnt); +} + +static ssize_t port_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, port_count_show_cb); +} + +static DEVICE_ATTR_RO(port_count); + +static ssize_t virtualization_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%s\n", + dev->attr.virtualization ? "true" : "false"); +} +static ssize_t virtualization_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, virtualization_show_cb); +} +static DEVICE_ATTR_RO(virtualization); + +static ssize_t tp_maintainer_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%s\n", + dev->attr.tp_maintainer ? "true" : "false"); +} +static ssize_t tp_maintainer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, tp_maintainer_show_cb); +} +static DEVICE_ATTR_RO(tp_maintainer); + +static ssize_t show_ue_cnt_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_ue_cnt); +} +static ssize_t ue_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_ue_cnt_cb); +} +static DEVICE_ATTR_RO(ue_cnt); + +static ssize_t show_page_size_cap_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "0x%llx\n", + dev->attr.dev_cap.page_size_cap); +} + +static ssize_t page_size_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_page_size_cap_cb); +} +static DEVICE_ATTR_RO(page_size_cap); + +static ssize_t show_dynamic_eid_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%d\n", dev->dynamic_eid); +} +static ssize_t dynamic_eid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, show_dynamic_eid_cb); +} +static DEVICE_ATTR_RO(dynamic_eid); + +static ssize_t max_eid_cnt_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + dev->attr.dev_cap.max_eid_cnt); +} + +static ssize_t max_eid_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, max_eid_cnt_show_cb); +} + +static DEVICE_ATTR_RO(max_eid_cnt); + +static ssize_t transport_type_show_cb(struct ubcore_device *dev, char *buf) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%d\n", + (int)dev->transport_type); +} + +static ssize_t transport_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, transport_type_show_cb); +} + +static DEVICE_ATTR_RO(transport_type); + +static ssize_t driver_name_show_cb(struct ubcore_device *dev, char *buf) +{ + if (dev->ops == NULL) + return -EINVAL; + + return snprintf(buf, UBCORE_MAX_DRIVER_NAME, "%s\n", + dev->ops->driver_name); +} + +static ssize_t driver_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, driver_name_show_cb); +} + +static DEVICE_ATTR_RO(driver_name); + +static ssize_t reserved_jetty_id_show_cb(struct ubcore_device *dev, char *buf) +{ + int ret = 0; + + ret = ubcore_query_device_attr(dev, &dev->attr); + if (ret != 0) { + ubcore_log_err("failed query device attr.\n"); + return ret; + } + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u-%u\n", + dev->attr.reserved_jetty_id_min, + dev->attr.reserved_jetty_id_max); +} + +static ssize_t reserved_jetty_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, reserved_jetty_id_show_cb); +} + +static ssize_t reserved_jetty_id_store_cb(struct ubcore_device *dev, + const char *buf, size_t len) +{ + uint32_t min_jetty_id = 0; + uint32_t max_jetty_id = 0; + int ret; + + ret = sscanf(buf, "%u-%u", &min_jetty_id, &max_jetty_id); + if (ret != UBCORE_RESERVED_JETTY_PARAM_NUM) + return -EINVAL; + + if (max_jetty_id < min_jetty_id) { + ubcore_log_err( + "Invalid param min jetty id:%u, max jetty id:%u\n", + min_jetty_id, max_jetty_id); + return -EINVAL; + } + + ret = ubcore_config_rsvd_jetty(dev, min_jetty_id, max_jetty_id); + if (ret != 0) { + ubcore_log_err("dev:%s, cfg min jetty id:%u, max jetty id:%u\n", + dev->dev_name, min_jetty_id, max_jetty_id); + return -EINVAL; + } + + return (ssize_t)len; +} + +static ssize_t reserved_jetty_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return ubcore_store_dev_attr(dev, attr, buf, len, + reserved_jetty_id_store_cb); +} + +static DEVICE_ATTR_RW(reserved_jetty_id); // 0644 + +static ssize_t net_dev_show_cb(struct ubcore_device *dev, char *buf) +{ + struct net_device *net_dev; + int ret = 0; + + if (!dev->netdev) + return 0; + + rcu_read_lock(); + net_dev = rcu_dereference(dev->netdev); + if (net_dev) + ret = snprintf(buf, IFNAMSIZ, "%s\n", net_dev->name); + rcu_read_unlock(); + + return ret; +} + +static ssize_t net_dev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return ubcore_show_dev_attr(dev, attr, buf, net_dev_show_cb); +} + +static DEVICE_ATTR_RO(net_dev); + +/* One eid line has up to 51 bytes with the format: + * "4294967295 0000:0000:0000:0000:0000:ffff:7f00:0001\n" + * sysfs buf size is PAGESIZE, up to 80 eid lines are supported in the sysfs + */ +#define UBCORE_MAX_EID_LINE 51 + +static ssize_t eid_show_cb(struct ubcore_device *dev, char *buf, + struct net *net, uint32_t eid_idx) +{ + union ubcore_eid invalid_eid = { 0 }; + struct ubcore_eid_entry *e; + ssize_t len = 0; + + spin_lock(&dev->eid_table.lock); + if (dev->eid_table.eid_entries == NULL || + eid_idx >= dev->eid_table.eid_cnt) { + spin_unlock(&dev->eid_table.lock); + return -EINVAL; + } + + e = &dev->eid_table.eid_entries[eid_idx]; + if (!e->valid || !net_eq(e->net, net)) + len = snprintf(buf, UBCORE_MAX_EID_LINE, "" EID_FMT "\n", + EID_ARGS(invalid_eid)); + else + len = snprintf(buf, UBCORE_MAX_EID_LINE, "" EID_FMT "\n", + EID_ARGS(e->eid)); + + spin_unlock(&dev->eid_table.lock); + return len; +} + +static ssize_t eid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ubcore_eid_attr *eid_attr = + container_of(attr, struct ubcore_eid_attr, attr); + struct ubcore_logic_device *ldev = dev_get_drvdata(dev); + struct ubcore_device *ub_dev = get_ubcore_device(ldev); + + if (!ldev || !ub_dev || !buf) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + return eid_show_cb(ub_dev, buf, read_pnet(&ldev->net), + eid_attr->eid_idx); +} + +static struct attribute *ubcore_dev_attrs[] = { + &dev_attr_ubdev.attr, + &dev_attr_guid.attr, + &dev_attr_max_upi_cnt.attr, + &dev_attr_feature.attr, + &dev_attr_max_jfc.attr, + &dev_attr_max_jfs.attr, + &dev_attr_max_jfr.attr, + &dev_attr_max_jetty.attr, + &dev_attr_max_jetty_grp.attr, + &dev_attr_max_jetty_in_jetty_grp.attr, + &dev_attr_max_jfc_depth.attr, + &dev_attr_max_jfs_depth.attr, + &dev_attr_max_jfr_depth.attr, + &dev_attr_max_jfs_inline_size.attr, + &dev_attr_max_jfs_sge.attr, + &dev_attr_max_jfs_rsge.attr, + &dev_attr_max_jfr_sge.attr, + &dev_attr_max_msg_size.attr, + &dev_attr_max_read_size.attr, + &dev_attr_max_write_size.attr, + &dev_attr_max_cas_size.attr, + &dev_attr_max_swap_size.attr, + &dev_attr_max_fetch_and_add_size.attr, + &dev_attr_max_fetch_and_sub_size.attr, + &dev_attr_max_fetch_and_and_size.attr, + &dev_attr_max_fetch_and_or_size.attr, + &dev_attr_max_fetch_and_xor_size.attr, + &dev_attr_atomic_feat.attr, + &dev_attr_max_rc_outstd_cnt.attr, + &dev_attr_trans_mode.attr, + &dev_attr_sub_trans_mode_cap.attr, + &dev_attr_congestion_ctrl_alg.attr, + &dev_attr_ceq_cnt.attr, + &dev_attr_utp_cnt.attr, + &dev_attr_max_tp_in_tpg.attr, + &dev_attr_max_oor_cnt.attr, + &dev_attr_mn.attr, + &dev_attr_max_netaddr_cnt.attr, + &dev_attr_port_count.attr, + &dev_attr_ue_cnt.attr, + &dev_attr_page_size_cap.attr, + &dev_attr_max_eid_cnt.attr, + &dev_attr_dynamic_eid.attr, + &dev_attr_virtualization.attr, + &dev_attr_tp_maintainer.attr, + &dev_attr_transport_type.attr, + &dev_attr_driver_name.attr, + &dev_attr_reserved_jetty_id.attr, + &dev_attr_net_dev.attr, + NULL, +}; + +static const struct attribute_group ubcore_dev_attr_group = { + .attrs = ubcore_dev_attrs, +}; + +static ssize_t ubcore_show_port_attr(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, + char *buf, + ubcore_show_port_attr_cb show_cb) +{ + struct ubcore_device *dev = p->dev; + + if (!dev || !buf) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + return show_cb(dev, buf, p->port_id); +} + +static ssize_t max_mtu_show_cb(struct ubcore_device *dev, char *buf, + uint8_t port_id) +{ + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%d\n", + (int)dev->attr.port_attr[port_id].max_mtu); +} + +static ssize_t max_mtu_show(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf) +{ + return ubcore_show_port_attr(p, attr, buf, max_mtu_show_cb); +} + +static PORT_ATTR_RO(max_mtu); + +static ssize_t state_show_cb(struct ubcore_device *dev, char *buf, + uint8_t port_id) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err("query device status for state failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + (uint32_t)status.port_status[port_id].state); +} + +static ssize_t state_show(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf) +{ + return ubcore_show_port_attr(p, attr, buf, state_show_cb); +} + +static PORT_ATTR_RO(state); + +static ssize_t active_speed_show_cb(struct ubcore_device *dev, char *buf, + uint8_t port_id) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err( + "query device status for active speed failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + status.port_status[port_id].active_speed); +} + +static ssize_t active_speed_show(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf) +{ + return ubcore_show_port_attr(p, attr, buf, active_speed_show_cb); +} + +static PORT_ATTR_RO(active_speed); + +static ssize_t active_width_show_cb(struct ubcore_device *dev, char *buf, + uint8_t port_id) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err( + "query device status for active width failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + status.port_status[port_id].active_width); +} + +static ssize_t active_width_show(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf) +{ + return ubcore_show_port_attr(p, attr, buf, active_width_show_cb); +} + +static PORT_ATTR_RO(active_width); + +static ssize_t active_mtu_show_cb(struct ubcore_device *dev, char *buf, + uint8_t port_id) +{ + struct ubcore_device_status status; + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err("query device status for active mtu failed.\n"); + return -EPERM; + } + + return snprintf(buf, UBCORE_MAX_VALUE_LEN, "%u\n", + (uint32_t)status.port_status[port_id].active_mtu); +} + +static ssize_t active_mtu_show(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf) +{ + return ubcore_show_port_attr(p, attr, buf, active_mtu_show_cb); +} + +static PORT_ATTR_RO(active_mtu); + +static struct attribute *ubcore_port_attrs[] = { + &port_attr_max_mtu.attr, &port_attr_state.attr, + &port_attr_active_speed.attr, &port_attr_active_width.attr, + &port_attr_active_mtu.attr, NULL, +}; + +static ssize_t ubcore_port_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct ubcore_port_attribute *port_attr = + container_of(attr, struct ubcore_port_attribute, attr); + struct ubcore_port_kobj *p = + container_of(kobj, struct ubcore_port_kobj, kobj); + + if (!port_attr->show) + return -EIO; + + return port_attr->show(p, port_attr, buf); +} + +static ssize_t ubcore_port_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + struct ubcore_port_attribute *port_attr = + container_of(attr, struct ubcore_port_attribute, attr); + struct ubcore_port_kobj *p = + container_of(kobj, struct ubcore_port_kobj, kobj); + + if (!port_attr->store) + return -EIO; + + return port_attr->store(p, port_attr, buf, count); +} + +static const struct sysfs_ops ubcore_port_sysfs_ops = { + .show = ubcore_port_attr_show, + .store = ubcore_port_attr_store +}; + +static void ubcore_port_release(struct kobject *kobj) +{ +} + +// ATTRIBUTE_GROUPS defined in 3.11, but must be consistent with kobj_type->default_groups +ATTRIBUTE_GROUPS(ubcore_port); + +static const struct kobj_type ubcore_port_type = { + .release = ubcore_port_release, + .sysfs_ops = &ubcore_port_sysfs_ops, + .default_groups = ubcore_port_groups + +}; + +int ubcore_create_port_attr_files(struct ubcore_logic_device *ldev, + struct ubcore_device *dev, uint8_t port_id) +{ + struct ubcore_port_kobj *p; + + p = &ldev->port[port_id]; + p->dev = dev; + p->port_id = port_id; + + return kobject_init_and_add(&p->kobj, &ubcore_port_type, + &ldev->dev->kobj, "port%hhu", port_id); +} + +static struct attribute_group * +ubcore_alloc_eids_group(struct ubcore_logic_device *ldev) +{ + struct ubcore_eid_attr *eid_attr_list = NULL; + struct attribute_group *attr_grp = NULL; + struct attribute **attr_list = NULL; + uint32_t eid_cnt = 0; + uint32_t i; + + eid_cnt = ldev->ub_dev->eid_table.eid_cnt; + if (eid_cnt == 0 || eid_cnt > UBCORE_MAX_EID_CNT) + return NULL; + + attr_grp = kcalloc(1, sizeof(*attr_grp), GFP_KERNEL); + if (!attr_grp) + return NULL; + + attr_list = kcalloc(eid_cnt + 1, sizeof(*attr_list), GFP_KERNEL); + if (!attr_list) + goto free_grp; + + eid_attr_list = + kcalloc(eid_cnt, sizeof(struct ubcore_eid_attr), GFP_KERNEL); + if (!eid_attr_list) + goto free_attr_list; + + // create /sys/class/dev_name/eids/eid_x + for (i = 0; i < eid_cnt; i++) { + struct ubcore_eid_attr *eid_attr = &eid_attr_list[i]; + + if (snprintf(eid_attr->name, UBCORE_EID_GROUP_NAME_LEN - 1, + "eid%d", i) < 0) + goto free_eid_attr; + + sysfs_attr_init(&eid_attr->attr.attr); + eid_attr->attr.attr.name = eid_attr->name; + eid_attr->attr.attr.mode = UBCORE_READ_ONLY_MODE; + eid_attr->attr.show = eid_show; + eid_attr->eid_idx = i; + attr_list[i] = &eid_attr->attr.attr; + } + + attr_grp->name = "eids"; + attr_grp->attrs = attr_list; + return attr_grp; + +free_eid_attr: + kfree(eid_attr_list); +free_attr_list: + kfree(attr_list); +free_grp: + kfree(attr_grp); + return NULL; +} + +static void ubcore_free_eids_group(struct ubcore_logic_device *ldev) +{ + const struct attribute_group *eid_grp; + struct ubcore_eid_attr *eid_attr_list; + struct device_attribute *dev_attr; + + eid_grp = ldev->dev_group[UBCORE_ATTR_GROUP_EIDS]; + if (eid_grp) { + if (eid_grp->attrs && eid_grp->attrs[0]) { + dev_attr = container_of(eid_grp->attrs[0], + struct device_attribute, attr); + eid_attr_list = container_of( + dev_attr, struct ubcore_eid_attr, attr); + kfree(eid_attr_list); + } + + kfree(eid_grp->attrs); + kfree(eid_grp); + + ldev->dev_group[UBCORE_ATTR_GROUP_EIDS] = NULL; + } +} + +int ubcore_create_dev_attr_files(struct ubcore_logic_device *ldev) +{ + int ret; + const struct attribute_group *eid_grp; + + eid_grp = ubcore_alloc_eids_group(ldev); + ldev->dev_group[UBCORE_ATTR_GROUP_DEV_DEFAULT] = &ubcore_dev_attr_group; + ldev->dev_group[UBCORE_ATTR_GROUP_EIDS] = eid_grp; + ldev->dev_group[UBCORE_ATTR_GROUP_NULL] = NULL; + + ret = sysfs_create_groups(&ldev->dev->kobj, ldev->dev_group); + if (ret != 0) { + ubcore_log_err("sysfs create group failed, ret:%d.\n", ret); + ubcore_free_eids_group(ldev); + return -1; + } + + return 0; +} + +void ubcore_remove_port_attr_files(struct ubcore_logic_device *ldev, + uint8_t port_id) +{ + kobject_put(&ldev->port[port_id].kobj); +} + +void ubcore_remove_dev_attr_files(struct ubcore_logic_device *ldev) +{ + sysfs_remove_groups(&ldev->dev->kobj, ldev->dev_group); + ubcore_free_eids_group(ldev); +} + +int ubcore_fill_logic_device_attr(struct ubcore_logic_device *ldev, + struct ubcore_device *dev) +{ + uint8_t p1, p2; /* port */ + + if (ubcore_create_dev_attr_files(ldev) != 0) { + ubcore_log_err("failed to fill attributes, device:%s.\n", + dev->dev_name); + return -EPERM; + } + + /* create /sys/class/ubcore/dev_name>/port* */ + for (p1 = 0; p1 < dev->attr.port_cnt; p1++) { + if (ubcore_create_port_attr_files(ldev, dev, p1) != 0) + goto err_port_attr; + } + + return 0; + +err_port_attr: + for (p2 = 0; p2 < p1; p2++) + ubcore_remove_port_attr_files(ldev, p2); + + ubcore_remove_dev_attr_files(ldev); + return -EPERM; +} + +void ubcore_unfill_logic_device_attr(struct ubcore_logic_device *ldev, + struct ubcore_device *dev) +{ + uint8_t p; + + for (p = 0; p < dev->attr.port_cnt; p++) + ubcore_remove_port_attr_files(ldev, p); + + ubcore_remove_dev_attr_files(ldev); +} diff --git a/drivers/ub/urma/ubcore/ubcore_cdev_file.h b/drivers/ub/urma/ubcore/ubcore_cdev_file.h new file mode 100644 index 000000000000..b063d795d66c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cdev_file.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device file ops file + * Author: Qian Guoxin + * Create: 2024-02-05 + * Note: + * History: 2024-02-05: Create file + */ + +#ifndef UBCORE_CDEV_FILE_H +#define UBCORE_CDEV_FILE_H + +#include + +struct ubcore_port_attribute { + struct attribute attr; + ssize_t (*show)(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, char *buf); + ssize_t (*store)(struct ubcore_port_kobj *p, + struct ubcore_port_attribute *attr, const char *buf, + size_t count); +}; + +#define PORT_ATTR(_name, _mode, _show, _store) \ + struct ubcore_port_attribute port_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +#define PORT_ATTR_RO(_name) \ + struct ubcore_port_attribute port_attr_##_name = __ATTR_RO(_name) + +int ubcore_fill_logic_device_attr(struct ubcore_logic_device *ldev, + struct ubcore_device *dev); +void ubcore_unfill_logic_device_attr(struct ubcore_logic_device *ldev, + struct ubcore_device *dev); + +#endif /* UBCORE_CDEV_FILE_H */ diff --git a/drivers/ub/urma/ubcore/ubcore_cgroup.c b/drivers/ub/urma/ubcore/ubcore_cgroup.c new file mode 100644 index 000000000000..ebac148bc8e5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cgroup.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cgroup resource control + * Author: Xu Zhicong + * Create: 2023-12-25 + * Note: + * History: 2023-12-25: create file + */ + +#include "ubcore_log.h" +#include +#include + +#ifdef CONFIG_CGROUP_RDMA +static inline bool ubcore_is_use_cg(struct ubcore_device *dev) +{ + return (dev != NULL && dev->transport_type == UBCORE_TRANSPORT_UB && + dev->cg_device.dev.name != NULL); +} + +void ubcore_cgroup_reg_dev(struct ubcore_device *dev) +{ + if (dev == NULL || strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("Invalid parameter"); + return; + } + + dev->cg_device.dev.name = dev->dev_name; + if (!ubcore_is_use_cg(dev)) + return; + + (void)rdmacg_register_device(&dev->cg_device.dev); +} +EXPORT_SYMBOL(ubcore_cgroup_reg_dev); + +void ubcore_cgroup_unreg_dev(struct ubcore_device *dev) +{ + if (!ubcore_is_use_cg(dev)) + return; + + rdmacg_unregister_device(&dev->cg_device.dev); +} +EXPORT_SYMBOL(ubcore_cgroup_unreg_dev); + +static enum rdmacg_resource_type +ubcore_get_rdma_resource_type(enum ubcore_resource_type type) +{ + switch (type) { + case UBCORE_RESOURCE_HCA_HANDLE: + return RDMACG_RESOURCE_HCA_HANDLE; + case UBCORE_RESOURCE_HCA_OBJECT: + return RDMACG_RESOURCE_HCA_OBJECT; + case UBCORE_RESOURCE_HCA_MAX: + default: + ubcore_log_err("not support cgroup resource type:%d", + (int)type); + } + + return RDMACG_RESOURCE_MAX; +} + +int ubcore_cgroup_try_charge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type) +{ + enum rdmacg_resource_type rdma_cg_type; + + if (cg_obj == NULL || cg_obj->cg == NULL) + return 0; + + if (!ubcore_is_use_cg(dev)) + return 0; + + rdma_cg_type = ubcore_get_rdma_resource_type(type); + if (rdma_cg_type == RDMACG_RESOURCE_MAX) + return -EINVAL; + + return rdmacg_try_charge(&cg_obj->cg, &dev->cg_device.dev, + rdma_cg_type); +} +EXPORT_SYMBOL(ubcore_cgroup_try_charge); + +void ubcore_cgroup_uncharge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type) +{ + enum rdmacg_resource_type rdma_cg_type; + + if (cg_obj == NULL || cg_obj->cg == NULL) + return; + + if (!ubcore_is_use_cg(dev)) + return; + + rdma_cg_type = ubcore_get_rdma_resource_type(type); + if (rdma_cg_type == RDMACG_RESOURCE_MAX) + return; + + rdmacg_uncharge(cg_obj->cg, &dev->cg_device.dev, rdma_cg_type); +} +EXPORT_SYMBOL(ubcore_cgroup_uncharge); + +#endif // CONFIG_CGROUP_RDMA diff --git a/drivers/ub/urma/ubcore/ubcore_cmd.h b/drivers/ub/urma/ubcore/ubcore_cmd.h new file mode 100644 index 000000000000..7043c5d20f61 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cmd.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cmd header file + * Author: Qian Guoxin + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBCORE_CMD_H +#define UBCORE_CMD_H + +#include +#include +#include "ubcore_log.h" +#include +#include "ubcore_topo_info.h" + +struct ubcore_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBCORE_CMD_MAGIC 'C' +#define UBCORE_CMD _IOWR(UBCORE_CMD_MAGIC, 1, struct ubcore_cmd_hdr) +#define UBCORE_MAX_CMD_SIZE 0x4000 + +/* only for ubcore device ioctl */ +enum ubcore_cmd { + UBCORE_CMD_QUERY_STATS = 1, + UBCORE_CMD_QUERY_RES, + UBCORE_CMD_ADD_EID, + UBCORE_CMD_DEL_EID, + UBCORE_CMD_SET_EID_MODE, + UBCORE_CMD_SET_NS_MODE, + UBCORE_CMD_SET_DEV_NS, + UBCORE_CMD_GET_TOPO_INFO, + UBCORE_CMD_SET_GENL_PID, + UBCORE_CMD_UVS_INIT_RES, + /* alpha netlink ops begin: */ + UBCORE_CMD_QUERY_TP_REQ, + UBCORE_CMD_QUERY_TP_RESP, + UBCORE_CMD_RESTORE_TP_REQ, + UBCORE_CMD_RESTORE_TP_RESP, + /* alpha netlink ops end: */ + UBCORE_CMD_UE2MUE_REQ, + UBCORE_CMD_MUE2UE_RESP, + UBCORE_CMD_ADD_SIP_REQ, + UBCORE_CMD_ADD_SIP_RESP, + UBCORE_CMD_DEL_SIP_REQ, + UBCORE_CMD_DEL_SIP_RESP, + UBCORE_CMD_TP_FLUSH_DONE_REQ, + UBCORE_CMD_TP_SUSPEND_REQ, + UBCORE_CMD_MIGRATE_VTP_SWITCH, + UBCORE_CMD_MIGRATE_VTP_ROLLBACK, + UBCORE_CMD_UPDATE_MUE_DEV_INFO_REQ, + UBCORE_CMD_UPDATE_MUE_DEV_INFO_RESP, + UBCORE_CMD_VTP_STATUS_NOTIFY, + UBCORE_CMD_MSG_ACK, + UBCORE_CMD_MAX +}; + +struct ubcore_cmd_query_stats { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t type; + uint32_t key; + } in; + struct { + uint64_t tx_pkt; + uint64_t rx_pkt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint64_t tx_pkt_err; + uint64_t rx_pkt_err; + } out; +}; + +struct ubcore_cmd_query_res { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t type; + uint32_t key; + uint32_t key_ext; + uint32_t key_cnt; + bool query_cnt; + } in; + struct { + uint64_t addr; + uint32_t len; + uint64_t save_ptr; /* save ubcore address for second ioctl */ + } out; +}; + +struct ubcore_cmd_show_utp { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t utpn; + } in; + struct { + uint64_t addr; + uint32_t len; + } out; +}; + +struct ubcore_cmd_update_ueid { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t eid_index; + int ns_fd; + } in; +}; + +struct ubcore_cmd_set_eid_mode { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + bool eid_mode; + } in; +}; + +struct ubcore_cmd_topo_info { + struct { + int node_idx; + } in; + struct { + uint32_t node_num; + struct ubcore_topo_info topo_info; + } out; +}; + +/* copy from user_space addr to kernel args */ +static inline int ubcore_copy_from_user(void *args, const void *args_addr, + unsigned long args_size) +{ + int ret; + + ret = (int)copy_from_user(args, args_addr, args_size); + if (ret != 0) + ubcore_log_err("copy from user failed, ret:%d.\n", ret); + return ret; +} + +/* copy kernel args to user_space addr */ +static inline int ubcore_copy_to_user(void *args_addr, const void *args, + unsigned long args_size) +{ + int ret; + + ret = (int)copy_to_user(args_addr, args, args_size); + if (ret != 0) + ubcore_log_err("copy to user failed ret:%d.\n", ret); + return ret; +} +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_cmd_tlv.c b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.c new file mode 100644 index 000000000000..ebd7bcfb84e9 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cmd tlv parse implement + * Author: Chen Yutao + * Create: 2024-08-06 + * Note: + * History: 2024-08-06: create file + */ + +#include "ubcore_cmd.h" +#include "ubcore_log.h" +#include +#include "ubcore_cmd_tlv.h" + +#define UBCORE_CMD_TLV_MAX_LEN \ + (sizeof(struct ubcore_cmd_attr) * UBCORE_CMD_OUT_TYPE_INIT) + +typedef void (*ubcore_fill_spec_func)(void *arg, struct ubcore_cmd_spec *s); + +struct ubcore_tlv_handler { + ubcore_fill_spec_func fill_spec_in; + size_t spec_in_len; + ubcore_fill_spec_func fill_spec_out; + size_t spec_out_len; + ubcore_fill_spec_func fill_spec_in_post; + size_t spec_in_len_post; +}; + +static inline void fill_spec(struct ubcore_cmd_spec *spec, uint16_t type, + uint16_t field_size, uint16_t el_num, + uint16_t el_size, uintptr_t data) +{ + *spec = (struct ubcore_cmd_spec) { + .type = type, + .flag = 0, + .field_size = field_size, + .attr_data.bs = { .el_num = el_num, .el_size = el_size }, + .data = data, + }; +} + +/** + * Fill spec with a field, which is a value or an array taken as a whole. + * @param v Full path of field, e.g. `arg->out.attr.dev_cap.feature` + */ +#define SPEC(spec, type, v) \ + fill_spec(spec, type, sizeof(v), 1, 0, (uintptr_t)(&(v))) + +/** + * Fill spec with a field, which belongs to an array of structs. + * @param v1 Full path of struct array, e.g. `arg->out.attr.port_attr` + * @param v2 Path relative to struct in array, e.g. `active_speed` + */ +#define SPEC_ARRAY(spec, type, v1, v2) \ + fill_spec(spec, type, sizeof((v1)->v2), ARRAY_SIZE(v1), \ + sizeof((v1)[0]), (uintptr_t)(&((v1)->v2))) +#define SPEC_ARRAY_DYNAMIC(spec, type, v1, el_num) \ + fill_spec(spec, type, sizeof((v1)[0]), el_num, sizeof((v1)[0]), \ + (uintptr_t)(&((v1)[0]))) + +static void ubcore_set_topo_fill_spec_in(void *arg_addr, + struct ubcore_cmd_spec *spec) +{ + struct ubcore_cmd_set_topo *arg = arg_addr; + struct ubcore_cmd_spec *s = spec; + + SPEC(s++, SET_TOPO_IN_TOPO_INFO, arg->in.topo_info); + SPEC(s++, SET_TOPO_IN_TOPO_NUM, arg->in.topo_num); +} + +static struct ubcore_tlv_handler + g_global_tlv_handler[] = { [0] = { 0 }, + [UBCORE_CMD_SET_TOPO] = { + ubcore_set_topo_fill_spec_in, + SET_TOPO_IN_NUM, + NULL, + 0, + } }; + +static struct ubcore_cmd_attr * +ubcore_create_tlv_attr(struct ubcore_cmd_hdr *hdr, uint32_t *attr_size) +{ + struct ubcore_cmd_attr *attr; + int ret; + + if (hdr->args_len % sizeof(struct ubcore_cmd_attr) != 0 || + hdr->args_len >= UBCORE_CMD_TLV_MAX_LEN) { + ubcore_log_err("Invalid args_len: %u.\n", hdr->args_len); + return NULL; + } + attr = kzalloc(hdr->args_len, GFP_KERNEL); + if (attr == NULL) + return NULL; + + ret = ubcore_copy_from_user( + attr, (void __user *)(uintptr_t)hdr->args_addr, hdr->args_len); + if (ret != 0) { + kfree(attr); + return NULL; + } + *attr_size = hdr->args_len / sizeof(struct ubcore_cmd_attr); + return attr; +} + +static int ubcore_cmd_tlv_parse_type(struct ubcore_cmd_spec *spec, + struct ubcore_cmd_attr *attr) +{ + uintptr_t ptr_src, ptr_dst; + uint32_t i; + int ret; + uint32_t spec_el_num = spec->attr_data.bs.el_num; + uint32_t attr_el_num = attr->attr_data.bs.el_num; + + /* length of ubcore spec and from uvs should be strictly checked */ + /* as length of uvs ioctl attr should be strictly equal to length of ubcore */ + if (spec->field_size != attr->field_size || + spec_el_num != attr_el_num) { + ubcore_log_err( + "Invalid attr, spec/attr, field_size: %u/%u, el_num: %u/%u, type: %u.\n", + spec->field_size, attr->field_size, spec_el_num, + attr_el_num, spec->type); + return -EINVAL; + } + + for (i = 0; i < spec_el_num; i++) { + ptr_dst = (spec->data) + i * spec->attr_data.bs.el_size; + ptr_src = (attr->data) + i * attr->attr_data.bs.el_size; + ret = ubcore_copy_from_user((void *)ptr_dst, + (void __user *)ptr_src, + spec->field_size); + if (ret != 0) + return ret; + } + + return ret; +} + +static int ubcore_cmd_tlv_parse(struct ubcore_cmd_spec *spec, + uint32_t spec_size, + struct ubcore_cmd_attr *attr, + uint32_t attr_size) +{ + uint32_t spec_idx, attr_idx; + bool match; + int ret; + + /* spec type of this range is only in type */ + for (spec_idx = 0; spec_idx < spec_size; spec_idx++) { + match = false; + for (attr_idx = 0; attr_idx < attr_size; attr_idx++) { + if (spec[spec_idx].type == attr[attr_idx].type) { + ret = ubcore_cmd_tlv_parse_type( + &spec[spec_idx], &attr[attr_idx]); + if (ret != 0) + return ret; + match = true; + break; + } + } + if (!match) { + ubcore_log_err( + "Failed to match mandatory in type: %u.\n", + spec[spec_idx].type); + return -1; + } + } + + return 0; +} + +static int ubcore_cmd_tlv_append_type(struct ubcore_cmd_spec *spec, + struct ubcore_cmd_attr *attr) +{ + uintptr_t ptr_src, ptr_dst; + uint32_t i; + int ret; + uint32_t spec_el_num = spec->attr_data.bs.el_num; + uint32_t attr_el_num = attr->attr_data.bs.el_num; + + /* length of ubcore spec and from uvs should be strictly checked */ + /* as length of uvs ioctl attr should be strictly equal to length of ubcore */ + if (spec->field_size != attr->field_size || + spec_el_num != attr_el_num) { + ubcore_log_err( + "Invalid attr, spec/attr, field_size: %u/%u, array_size: %u/%u, type: %u.\n", + spec->field_size, attr->field_size, spec_el_num, + attr_el_num, spec->type); + return -EINVAL; + } + + for (i = 0; i < spec_el_num; i++) { + ptr_src = (spec->data) + i * spec->attr_data.bs.el_size; + ptr_dst = (attr->data) + i * attr->attr_data.bs.el_size; + ret = ubcore_copy_to_user((void __user *)ptr_dst, + (void *)ptr_src, spec->field_size); + if (ret != 0) + return ret; + } + + return ret; +} + +static int ubcore_cmd_tlv_append(struct ubcore_cmd_spec *spec, + uint32_t spec_size, + struct ubcore_cmd_attr *attr, + uint32_t attr_size) +{ + uint32_t spec_idx, attr_idx; + int ret; + + for (spec_idx = 0; spec_idx < spec_size; spec_idx++) { + for (attr_idx = 0; attr_idx < attr_size; attr_idx++) { + if (spec[spec_idx].type == attr[attr_idx].type && + spec[spec_idx].field_size != 0) { + ret = ubcore_cmd_tlv_append_type( + &spec[spec_idx], &attr[attr_idx]); + if (ret != 0) + return ret; + break; + } + } + } + return 0; +} + +int ubcore_tlv_parse(ubcore_fill_spec_func fill_spec, size_t spec_size, + struct ubcore_cmd_hdr *hdr, void *arg) +{ + struct ubcore_cmd_spec *spec = NULL; + struct ubcore_cmd_attr *attr = NULL; + uint32_t attr_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (fill_spec == NULL) { + ubcore_log_err("Invalid command: %u.\n", hdr->command); + return -EINVAL; + } + + spec = kcalloc(spec_size, sizeof(struct ubcore_cmd_spec), GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + fill_spec(arg, spec); + + attr = ubcore_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = ubcore_cmd_tlv_parse(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} + +int ubcore_tlv_append(ubcore_fill_spec_func fill_spec, size_t spec_size, + struct ubcore_cmd_hdr *hdr, void *arg) +{ + struct ubcore_cmd_spec *spec = NULL; + struct ubcore_cmd_attr *attr = NULL; + uint32_t attr_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (fill_spec == NULL) { + ubcore_log_err("Invalid command: %u.\n", hdr->command); + return -EINVAL; + } + + spec = kcalloc(spec_size, sizeof(struct ubcore_cmd_spec), GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + fill_spec(arg, spec); + + attr = ubcore_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = ubcore_cmd_tlv_append(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} + +int ubcore_global_tlv_parse(struct ubcore_cmd_hdr *hdr, void *arg) +{ + return ubcore_tlv_parse(g_global_tlv_handler[hdr->command].fill_spec_in, + g_global_tlv_handler[hdr->command].spec_in_len, + hdr, arg); +} + +int ubcore_global_tlv_append(struct ubcore_cmd_hdr *hdr, void *arg) +{ + return ubcore_tlv_append( + g_global_tlv_handler[hdr->command].fill_spec_out, + g_global_tlv_handler[hdr->command].spec_out_len - + UBCORE_CMD_OUT_TYPE_INIT, + hdr, arg); +} diff --git a/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h new file mode 100644 index 000000000000..ec7ee36361f5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h @@ -0,0 +1,927 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore cmd tlv parse header, ubcore cmd struct consists of + * type/length/value, ioctl operations are copyed and parsed by tlv form + * Author: Chen Yutao + * Create: 2024-08-06 + * Note: + * History: 2024-08-06: create file + */ + +#ifndef UBCORE_CMD_TLV_H +#define UBCORE_CMD_TLV_H + +#include + +#include "ubcore_cmd.h" +#include "ubcore_uvs_cmd.h" + +#define UBCORE_CMD_OUT_TYPE_INIT 0x80 + +struct ubcore_cmd_attr { + uint8_t type; /* See enum ubcore_cmd_xxx_type */ + uint8_t flag; + uint16_t field_size; + union { + struct { + uint32_t el_num : 20; /* Array element number if field is in an array */ + uint32_t el_size : 12; /* Array element size if field is in an array */ + } bs; + uint32_t value; + } attr_data; + uint64_t data; +}; + +struct ubcore_cmd_spec { + uint8_t type; /* See ubcore_cmd_xxx_type_t */ + uint8_t flag; + uint16_t field_size; + union { + struct { + uint32_t el_num : 20; /* Array element number if field is in an array */ + uint32_t el_size : 12; /* Array element size if field is in an array */ + } bs; + uint32_t value; + } attr_data; + uint64_t data; +}; + +/* Attention: for all these below enums, */ +/* new element should ONLY be added into the bottom */ +/* See struct ubcore_cmd_channel_init */ +enum ubcore_cmd_channel_init_type { + /* In type */ + CHANNEL_INIT_IN_MUE_NAME, + CHANNEL_INIT_IN_USERSPACE_IN, + CHANNEL_INIT_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CHANNEL_INIT_OUT_KERNEL_OUT = UBCORE_CMD_OUT_TYPE_INIT, + CHANNEL_INIT_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_set_mue_cfg */ +enum ubcore_cmd_set_mue_cfg_type { + /* In type */ + SET_MUE_CFG_IN_MUE_MUE_NAME, + SET_MUE_CFG_IN_MUE_CFG_MASK, + SET_MUE_CFG_IN_MUE_CFG_SUSPEND_PERIOD, + SET_MUE_CFG_IN_MUE_CFG_SUSPEND_CNT, + SET_MUE_CFG_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_create_tpg, in/out type should be continuous */ +enum ubcore_cmd_create_tpg_type { + /* In type */ + CREATE_TPG_IN_MUE_NAME, + CREATE_TPG_IN_LOCAL_EID, + CREATE_TPG_IN_PEER_EID, + CREATE_TPG_IN_TRANS_MODE, + CREATE_TPG_IN_DSCP, + CREATE_TPG_IN_CC_ALG, + CREATE_TPG_IN_CC_PATTERN_IDX, + CREATE_TPG_IN_TP_CNT, + CREATE_TPG_IN_LOCAL_NET_ADDR, + CREATE_TPG_IN_FLAG, + CREATE_TPG_IN_LOCAL_JETTY, + CREATE_TPG_IN_UE_IDX, + CREATE_TPG_IN_PEER_JETTY, + CREATE_TPG_IN_TP_TRANS_MODE, + CREATE_TPG_IN_RETRY_NUM, + CREATE_TPG_IN_RETRY_FACTOR, + CREATE_TPG_IN_ACK_TIMEOUT, + CREATE_TPG_IN_TP_DSCP, + CREATE_TPG_IN_OOR_CNT, + CREATE_TPG_IN_TA_TRANS_TYPE, + CREATE_TPG_IN_TA_TYPE, + CREATE_TPG_IN_JETTY_ID, + CREATE_TPG_IN_TJETTY_ID, + CREATE_TPG_IN_IS_TARGET, + CREATE_TPG_IN_NUM, /* Only for calculating number of types */ + /* out type */ + CREATE_TPG_OUT_TPGN = UBCORE_CMD_OUT_TYPE_INIT, + CREATE_TPG_OUT_TPN, + CREATE_TPG_OUT_MAX_MTU, + CREATE_TPG_OUT_LOCAL_MTU, + CREATE_TPG_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_create_vtp, in/out type should be continuous */ +enum ubcore_cmd_create_vtp_type { + /* In type */ + CREATE_VTP_IN_MUE_NAME, + CREATE_VTP_IN_TPGN, + CREATE_VTP_IN_PEER_TPGN, + CREATE_VTP_IN_TP_ATTR_FLAG, + CREATE_VTP_IN_TP_ATTR_PEER_TPN, + CREATE_VTP_IN_TP_ATTR_STATE, + CREATE_VTP_IN_TP_ATTR_TX_PSN, + CREATE_VTP_IN_TP_ATTR_RX_PSN, + CREATE_VTP_IN_TP_ATTR_MTU, + CREATE_VTP_IN_TP_ATTR_CC_APTTERN_IDX, + CREATE_VTP_IN_TP_ATTR_PEER_EXT_ADDR, + CREATE_VTP_IN_TP_ATTR_PEER_EXT_LEN, + CREATE_VTP_IN_TP_ATTR_OOS_CNT, + CREATE_VTP_IN_TP_ATTR_LOCAL_NETADDR_IDX, + CREATE_VTP_IN_TP_ATTR_PEER_NETADDR, + CREATE_VTP_IN_TP_ATTR_DATA_UDP_START, + CREATE_VTP_IN_TP_ATTR_ACK_UDP_START, + CREATE_VTP_IN_TP_ATTR_UDP_RANGE, + CREATE_VTP_IN_TP_ATTR_HOP_LIMIT, + CREATE_VTP_IN_TP_ATTR_FLOW_LABEL, + CREATE_VTP_IN_TP_ATTR_PORT_ID, + CREATE_VTP_IN_TP_ATTR_MN, + CREATE_VTP_IN_TP_ATTR_PEER_TRANS_TYPE, + CREATE_VTP_IN_TP_ATTR_MASK, + CREATE_VTP_IN_CFG_UE_IDX, + CREATE_VTP_IN_CFG_VTPN, + CREATE_VTP_IN_CFG_LOCAL_JETTY, + CREATE_VTP_IN_CFG_LOCAL_EID, + CREATE_VTP_IN_CFG_PEER_EID, + CREATE_VTP_IN_CFG_PEER_JETTY, + CREATE_VTP_IN_CFG_FLAG, + CREATE_VTP_IN_CFG_TRANS_MODE, + CREATE_VTP_IN_CFG_VALUE, + CREATE_VTP_IN_EID_IDX, + CREATE_VTP_IN_UPI, + CREATE_VTP_IN_SHARE_MODE, + CREATE_VTP_IN_NUM, + /* Out type */ + CREATE_VTP_OUT_RTR_TP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + CREATE_VTP_OUT_RTS_TP_CNT, + CREATE_VTP_OUT_VTPN, + CREATE_VTP_OUT_NUM +}; + +/* See struct ubcore_cmd_modify_tpg, in/out type should be continuous */ +enum ubcore_cmd_modify_tpg_type { + /* In type */ + MODIFY_TPG_IN_MUE_NAME, + MODIFY_TPG_IN_PEER_TP_CNT, + MODIFY_TPG_IN_TPGN, + MODIFY_TPG_IN_PEER_TPGN, + MODIFY_TPG_IN_TP_ATTR_FLAG, + MODIFY_TPG_IN_TP_ATTR_PEER_TPN, + MODIFY_TPG_IN_TP_ATTR_STATE, + MODIFY_TPG_IN_TP_ATTR_TX_PSN, + MODIFY_TPG_IN_TP_ATTR_RX_PSN, + MODIFY_TPG_IN_TP_ATTR_MTU, + MODIFY_TPG_IN_TP_ATTR_CC_PATTERN_IDX, + MODIFY_TPG_IN_TP_ATTR_PEER_EXT_ADDR, + MODIFY_TPG_IN_TP_ATTR_PEER_EXT_LEN, + MODIFY_TPG_IN_TP_ATTR_OOS_CNT, + MODIFY_TPG_IN_TP_ATTR_LOCAL_NETADDR_IDX, + MODIFY_TPG_IN_TP_ATTR_PEER_NETADDR, + MODIFY_TPG_IN_TP_ATTR_DATA_UDP_START, + MODIFY_TPG_IN_TP_ATTR_ACK_UDP_START, + MODIFY_TPG_IN_TP_ATTR_UDP_RANGE, + MODIFY_TPG_IN_TP_ATTR_HOP_LIMIT, + MODIFY_TPG_IN_TP_ATTR_FLOW_LABEL, + MODIFY_TPG_IN_TP_ATTR_PORT_ID, + MODIFY_TPG_IN_TP_ATTR_MN, + MODIFY_TPG_IN_TP_ATTR_PEER_TRANS_TYPE, + MODIFY_TPG_IN_RTR_MASK, + MODIFY_TPG_IN_TA_TRANS_TYPE, + MODIFY_TPG_IN_TA_TYPE, + MODIFY_TPG_IN_TA_JETTY_ID, + MODIFY_TPG_IN_TA_TJETTY_ID, + MODIFY_TPG_IN_TA_IS_TARGET, + MODIFY_TPG_IN_UDRV_IN_ADDR, + MODIFY_TPG_IN_UDRV_IN_LEN, + MODIFY_TPG_IN_UDRV_OUT_ADDR, + MODIFY_TPG_IN_UDRV_OUT_LEN, + MODIFY_TPG_IN_NUM, + /* Out type */ + MODIFY_TPG_OUT_RTR_TP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + MODIFY_TPG_OUT_RTS_TP_CNT, + MODIFY_TPG_OUT_NUM +}; + +/* See struct ubcore_cmd_modify_tpg_map_vtp, in/out type should be continuous */ +enum ubcore_cmd_modify_tpg_map_vtp_type { + /* In type */ + MODIFY_TPG_MAP_VTP_IN_MUE_NAME, + MODIFY_TPG_MAP_VTP_IN_PEER_TP_CNT, + MODIFY_TPG_MAP_VTP_IN_TPGN, + MODIFY_TPG_MAP_VTP_IN_PEER_TPGN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_FLAG, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PEER_TPN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_STATE, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_TX_PSN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_RX_PSN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_MTU, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_CC_PATTERN_IDX, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PEER_EXT_ADDR, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PEER_EXT_LEN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_OOS_CNT, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_LOCAL_NETADDR_IDX, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PEER_NETADDR, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_DATA_UDP_START, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_ACK_UDP_START, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_UDP_RANGE, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_HOP_LIMIT, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_FLOW_LABEL, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PORT_ID, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_MN, + MODIFY_TPG_MAP_VTP_IN_TP_ATTR_PEER_TRANS_TYPE, + MODIFY_TPG_MAP_VTP_IN_RTR_MASK, + MODIFY_TPG_MAP_VTP_IN_CFG_UE_IDX, + MODIFY_TPG_MAP_VTP_IN_CFG_VTPN, + MODIFY_TPG_MAP_VTP_IN_CFG_LOCAL_JETTY, + MODIFY_TPG_MAP_VTP_IN_CFG_LOCAL_EID, + MODIFY_TPG_MAP_VTP_IN_CFG_PEER_EID, + MODIFY_TPG_MAP_VTP_IN_CFG_PEER_JETTY, + MODIFY_TPG_MAP_VTP_IN_CFG_FLAG, + MODIFY_TPG_MAP_VTP_IN_CFG_TRANS_MODE, + MODIFY_TPG_MAP_VTP_IN_CFG_VALUE, + MODIFY_TPG_MAP_VTP_IN_ROLE, + MODIFY_TPG_MAP_VTP_IN_EID_IDX, + MODIFY_TPG_MAP_VTP_IN_UPI, + MODIFY_TPG_MAP_VTP_IN_SHARE_MODE, + MODIFY_TPG_MAP_VTP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + MODIFY_TPG_MAP_VTP_OUT_RTR_TP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + MODIFY_TPG_MAP_VTP_OUT_RTS_TP_CNT, + MODIFY_TPG_MAP_VTP_OUT_VTPN, + MODIFY_TPG_MAP_VTP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_modify_tpg_tp_cnt, in/out type should be continuous */ +enum ubcore_cmd_modify_tpg_tp_cnt_type { + /* In type */ + MODIFY_TPG_TP_CNT_IN_MUE_MUE_NAME, + MODIFY_TPG_TP_CNT_IN_TPGN_FOR_MODIFY, + MODIFY_TPG_TP_CNT_IN_TP_CNT, + MODIFY_TPG_TP_CNT_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + MODIFY_TPG_TP_CNT_OUT_TPGN = UBCORE_CMD_OUT_TYPE_INIT, + MODIFY_TPG_TP_CNT_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_create_target_tpg, in/out type should be continuous */ +enum ubcore_cmd_create_target_tpg_type { + /* In type */ + CREATE_TARGET_TPG_IN_MUE_NAME, + CREATE_TARGET_TPG_IN_TPG_CFG_LOCAL_EID, + CREATE_TARGET_TPG_IN_TPG_CFG_PEER_EID, + CREATE_TARGET_TPG_IN_TPG_CFG_TRANS_MODE, + CREATE_TARGET_TPG_IN_TPG_CFG_DSCP, + CREATE_TARGET_TPG_IN_TPG_CFG_CC_ALG, + CREATE_TARGET_TPG_IN_TPG_CFG_CC_PATTERN_IDX, + CREATE_TARGET_TPG_IN_TPG_CFG_TP_CNT, + CREATE_TARGET_TPG_IN_TPG_CFG_LOCAL_NET_ADDR, + CREATE_TARGET_TPG_IN_TP_CFG_FLAG, + CREATE_TARGET_TPG_IN_TP_CFG_LOCAL_JETTY, + CREATE_TARGET_TPG_IN_TP_CFG_UE_IDX, + CREATE_TARGET_TPG_IN_TP_CFG_PEER_JETTY, + CREATE_TARGET_TPG_IN_TP_CFG_TRANS_MODE, + CREATE_TARGET_TPG_IN_TP_CFG_RETRY_NUM, + CREATE_TARGET_TPG_IN_TP_CFG_RETRY_FACTOR, + CREATE_TARGET_TPG_IN_TP_CFG_ACK_TIMEOUT, + CREATE_TARGET_TPG_IN_TP_CFG_DSCP, + CREATE_TARGET_TPG_IN_TP_CFG_OOR_CNT, + CREATE_TARGET_TPG_IN_PEER_TPGN, + CREATE_TARGET_TPG_IN_RTR_ATTR_FLAG, + CREATE_TARGET_TPG_IN_RTR_ATTR_PEER_TPN, + CREATE_TARGET_TPG_IN_RTR_ATTR_STATE, + CREATE_TARGET_TPG_IN_RTR_ATTR_TX_PSN, + CREATE_TARGET_TPG_IN_RTR_ATTR_RX_PSN, + CREATE_TARGET_TPG_IN_RTR_ATTR_MTU, + CREATE_TARGET_TPG_IN_RTR_ATTR_CC_PATTERN_IDX, + CREATE_TARGET_TPG_IN_RTR_ATTR_PEER_EXT_ADDR, + CREATE_TARGET_TPG_IN_RTR_ATTR_PEER_EXT_LEN, + CREATE_TARGET_TPG_IN_RTR_ATTR_OOS_CNT, + CREATE_TARGET_TPG_IN_RTR_ATTR_LOCAL_NET_ADDR_IDX, + CREATE_TARGET_TPG_IN_RTR_ATTR_PEER_NET_ADDR, + CREATE_TARGET_TPG_IN_RTR_ATTR_DATA_UDP_START, + CREATE_TARGET_TPG_IN_RTR_ATTR_ACK_UDP_START, + CREATE_TARGET_TPG_IN_RTR_ATTR_UDP_RANGE, + CREATE_TARGET_TPG_IN_RTR_ATTR_HOP_LIMIT, + CREATE_TARGET_TPG_IN_RTR_ATTR_FLOW_LABEL, + CREATE_TARGET_TPG_IN_RTR_ATTR_PORT_ID, + CREATE_TARGET_TPG_IN_RTR_ATTR_MN, + CREATE_TARGET_TPG_IN_RTR_ATTR_PEER_TRANS_TYPE, + CREATE_TARGET_TPG_IN_RTR_MASK, + CREATE_TARGET_TPG_IN_TA_DATA_TRANS_TYPE, + CREATE_TARGET_TPG_IN_TA_DATA_TA_TYPE, + CREATE_TARGET_TPG_IN_TA_DATA_JETTY_ID, + CREATE_TARGET_TPG_IN_TA_DATA_TJETTY_ID, + CREATE_TARGET_TPG_IN_TA_DATA_IS_TARGET, + CREATE_TARGET_TPG_IN_PEER_MTU, + CREATE_TARGET_TPG_IN_UDATA_IN_ADDR, + CREATE_TARGET_TPG_IN_UDATA_IN_LEN, + CREATE_TARGET_TPG_IN_UDATA_OUT_ADDR, + CREATE_TARGET_TPG_IN_UDATA_OUT_LEN, + CREATE_TARGET_TPG_IN_UDRV_EXT_IN_ADDR, + CREATE_TARGET_TPG_IN_UDRV_EXT_IN_LEN, + CREATE_TARGET_TPG_IN_UDRV_EXT_OUT_ADDR, + CREATE_TARGET_TPG_IN_UDRV_EXT_OUT_LEN, + CREATE_TARGET_TPG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_TARGET_TPG_OUT_TPGN = UBCORE_CMD_OUT_TYPE_INIT, + CREATE_TARGET_TPG_OUT_TPN, + CREATE_TARGET_TPG_OUT_RTS_TP_CNT, + CREATE_TARGET_TPG_OUT_LOCAL_MTU, + CREATE_TARGET_TPG_OUT_UDRV_EXT_IN_ADDR, + CREATE_TARGET_TPG_OUT_UDRV_EXT_IN_LEN, + CREATE_TARGET_TPG_OUT_UDRV_EXT_OUT_ADDR, + CREATE_TARGET_TPG_OUT_UDRV_EXT_OUT_LEN, + CREATE_TARGET_TPG_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_modify_target_tpg, in/out type should be continuous */ +enum ubcore_cmd_modify_target_tpg_type { + /* In type */ + MODIFY_TARGET_TPG_IN_MUE_NAME, + MODIFY_TARGET_TPG_IN_PEER_TP_CNT, + MODIFY_TARGET_TPG_IN_TPGN, + MODIFY_TARGET_TPG_IN_PEER_TPGN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_FLAG, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PEER_TPN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_STATE, + MODIFY_TARGET_TPG_IN_RTR_ATTR_TX_PSN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_RX_PSN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_MTU, + MODIFY_TARGET_TPG_IN_RTR_ATTR_CC_PATTERN_IDX, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PEER_EXT_ADDR, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PEER_EXT_LEN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_OOS_CNT, + MODIFY_TARGET_TPG_IN_RTR_ATTR_LOCAL_NET_ADDR_IDX, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PEER_NET_ADDR, + MODIFY_TARGET_TPG_IN_RTR_ATTR_DATA_UDP_START, + MODIFY_TARGET_TPG_IN_RTR_ATTR_ACK_UDP_START, + MODIFY_TARGET_TPG_IN_RTR_ATTR_UDP_RANGE, + MODIFY_TARGET_TPG_IN_RTR_ATTR_HOP_LIMIT, + MODIFY_TARGET_TPG_IN_RTR_ATTR_FLOW_LABEL, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PORT_ID, + MODIFY_TARGET_TPG_IN_RTR_ATTR_MN, + MODIFY_TARGET_TPG_IN_RTR_ATTR_PEER_TRANS_TYPE, + MODIFY_TARGET_TPG_IN_RTR_MASK, + MODIFY_TARGET_TPG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + MODIFY_TARGET_TPG_OUT_RTR_TP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + MODIFY_TARGET_TPG_OUT_RTS_TP_CNT, + MODIFY_TARGET_TPG_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_destroy_vtp */ +enum ubcore_cmd_destroy_vtp_type { + /* In type */ + DESTROY_VTP_IN_MUE_MUE_NAME, + DESTROY_VTP_IN_MODE, + DESTROY_VTP_IN_LOCAL_JETTY, + DESTROY_VTP_IN_ROLE, + DESTROY_VTP_IN_LOCAL_EID, + DESTROY_VTP_IN_PEER_EID, + DESTROY_VTP_IN_PEER_JETTY, + DESTROY_VTP_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_destroy_tpg, in/out type should be continuous */ +enum ubcore_cmd_destroy_tpg_type { + /* In type */ + DESTROY_TPG_IN_MUE_MUE_NAME, + DESTROY_TPG_IN_TPGN, + DESTROY_TPG_IN_TA_DATA_TRANS_TYPE, + DESTROY_TPG_IN_TA_DATA_TA_TYPE, + DESTROY_TPG_IN_TA_DATA_JETTY_ID, + DESTROY_TPG_IN_TA_DATA_TJETTY_ID, + DESTROY_TPG_IN_TA_DATA_IS_TARGET, + DESTROY_TPG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DESTROY_TPG_OUT_DESTROYED_TP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + DESTROY_TPG_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_sip, in/out type should be continuous */ +enum ubcore_cmd_opt_sip_type { + /* In type */ + OPT_SIP_IN_INFO_DEV_NAME, + OPT_SIP_IN_INFO_ADDR, + OPT_SIP_IN_INFO_PREFIX_LEN, + OPT_SIP_IN_INFO_PORT_CNT, + OPT_SIP_IN_INFO_PORT_ID, + OPT_SIP_IN_INFO_MTU, + OPT_SIP_IN_INFO_NETDEV_NAME, + OPT_SIP_IN_INFO_IS_ACTIVE, + OPT_SIP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + OPT_SIP_OUT_SIP_IDX = UBCORE_CMD_OUT_TYPE_INIT, + OPT_SIP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_map_vtp, in/out type should be continuous */ +enum ubcore_cmd_map_vtp_type { + /* In type */ + MAP_VTP_IN_MUE_NAME, + MAP_VTP_IN_VTP_UE_IDX, + MAP_VTP_IN_VTP_VTPN, + MAP_VTP_IN_VTP_LOCAL_JETTY, + MAP_VTP_IN_VTP_LOCAL_EID, + MAP_VTP_IN_VTP_PEER_EID, + MAP_VTP_IN_VTP_PEER_JETTY, + MAP_VTP_IN_VTP_FLAG, + MAP_VTP_IN_VTP_TRANS_MODE, + MAP_VTP_IN_VTP_VALUE, + MAP_VTP_IN_ROLE, + MAP_VTP_IN_EID_IDX, + MAP_VTP_IN_UPI, + MAP_VTP_IN_SHARE_MODE, + MAP_VTP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + MAP_VTP_OUT_VTPN = UBCORE_CMD_OUT_TYPE_INIT, + MAP_VTP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_create_utp, in/out type should be continuous */ +enum ubcore_cmd_create_utp_type { + /* In type */ + CREATE_UTP_IN_MUE_NAME, + CREATE_UTP_IN_UTP_CFG_FLAG, + CREATE_UTP_IN_UTP_CFG_UDP_START, + CREATE_UTP_IN_UTP_CFG_UDP_RANGE, + CREATE_UTP_IN_UTP_CFG_LOCAL_NET_ADDR_IDX, + CREATE_UTP_IN_UTP_CFG_LOCAL_NET_ADDR, + CREATE_UTP_IN_UTP_CFG_PEER_NET_ADDR, + CREATE_UTP_IN_UTP_CFG_FLOW_LABEL, + CREATE_UTP_IN_UTP_CFG_DSCP, + CREATE_UTP_IN_UTP_CFG_HOP_LIMIT, + CREATE_UTP_IN_UTP_CFG_PORT_ID, + CREATE_UTP_IN_UTP_CFG_MTU, + CREATE_UTP_IN_VTP_UE_IDX, + CREATE_UTP_IN_VTP_VTPN, + CREATE_UTP_IN_VTP_LOCAL_JETTY, + CREATE_UTP_IN_VTP_LOCAL_EID, + CREATE_UTP_IN_VTP_PEER_EID, + CREATE_UTP_IN_VTP_PEER_JETTY, + CREATE_UTP_IN_VTP_FLAG, + CREATE_UTP_IN_VTP_TRANS_MODE, + CREATE_UTP_IN_VTP_VALUE, + CREATE_UTP_IN_EID_IDX, + CREATE_UTP_IN_UPI, + CREATE_UTP_IN_SHARE_MODE, + CREATE_UTP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_UTP_OUT_IDX = UBCORE_CMD_OUT_TYPE_INIT, + CREATE_UTP_OUT_VTPN, + CREATE_UTP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_destroy_utp */ +enum ubcore_cmd_destroy_utp_type { + /* In type */ + DESTROY_UTP_IN_MUE_NAME, + DESTROY_UTP_IN_UTP_IDX, + DESTROY_UTP_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_get_dev_feature, in/out type should be continuous */ +enum ubcore_cmd_get_dev_feature_type { + /* In type */ + GET_DEV_FEATURE_IN_DEV_NAME, + GET_DEV_FEATURE_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_DEV_FEATURE_OUT_FEATURE = UBCORE_CMD_OUT_TYPE_INIT, + GET_DEV_FEATURE_OUT_MAX_UEID_CNT, + GET_DEV_FEATURE_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_restore_tp_error */ +enum ubcore_cmd_restore_tp_error_type { + /* In type */ + RESTORE_TP_ERROR_IN_MUE_MUE_NAME, + RESTORE_TP_ERROR_IN_TPN, + RESTORE_TP_ERROR_IN_DATA_UDP_START, + RESTORE_TP_ERROR_IN_ACK_UDP_START, + RESTORE_TP_ERROR_IN_RX_PSN, + RESTORE_TP_ERROR_IN_TX_PSN, + RESTORE_TP_ERROR_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_restore_tp_suspend */ +enum ubcore_cmd_restore_tp_suspend_type { + /* In type */ + RESTORE_TP_SUSPEND_IN_MUE_MUE_NAME, + RESTORE_TP_SUSPEND_IN_TPGN, + RESTORE_TP_SUSPEND_IN_TPN, + RESTORE_TP_SUSPEND_IN_DATA_UDP_START, + RESTORE_TP_SUSPEND_IN_ACK_UDP_START, + RESTORE_TP_SUSPEND_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_change_tp_to_error */ +enum ubcore_cmd_change_tp_to_error_type { + /* In type */ + CHANGE_TP_TO_ERROR_IN_MUE_MUE_NAME, + CHANGE_TP_TO_ERROR_IN_TPN, + CHANGE_TP_TO_ERROR_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_set_upi */ +enum ubcore_cmd_set_upi_type { + /* In type */ + SET_UPI_IN_DEV_NAME, + SET_UPI_IN_UPI, + SET_UPI_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_show_upi, in/out type should be continuous */ +enum ubcore_cmd_show_upi_type { + /* In type */ + SHOW_UPI_IN_DEV_NAME, + SHOW_UPI_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + SHOW_UPI_OUT_UPI = UBCORE_CMD_OUT_TYPE_INIT, + SHOW_UPI_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_config_function_migrate_state, in/out type should be continuous */ +enum ubcore_cmd_cfg_fm_state_type { + /* In type */ + CFG_FM_STATE_IN_UE_IDX, + CFG_FM_STATE_IN_MUE_NAME, + CFG_FM_STATE_IN_UEID_CFG, /* struct ubcore_ueid_cfg is regarded as a whole */ + CFG_FM_STATE_IN_CFG_CNT, + CFG_FM_STATE_IN_STATE, + CFG_FM_STATE_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CFG_FM_STATE_OUT_CNT = UBCORE_CMD_OUT_TYPE_INIT, + CFG_FM_STATE_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_set_vport_cfg */ +enum ubcore_cmd_set_vport_cfg_type { + /* In type */ + SET_VPORT_CFG_IN_MASK, + SET_VPORT_CFG_IN_DEV_NAME, + SET_VPORT_CFG_IN_UE_IDX, + SET_VPORT_CFG_IN_PATTERN, + SET_VPORT_CFG_IN_VIRT, + SET_VPORT_CFG_IN_MIN_JETTY_CNT, + SET_VPORT_CFG_IN_MAX_JETTY_CNT, + SET_VPORT_CFG_IN_MIN_JFR_CNT, + SET_VPORT_CFG_IN_MAX_JFR_CNT, + SET_VPORT_CFG_IN_TP_CNT, + SET_VPORT_CFG_IN_SLICE, + SET_VPORT_CFG_IN_UVS_NAME, + SET_VPORT_CFG_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_modify_vtp */ +enum ubcore_cmd_modify_vtp_type { + /* In type */ + MODIFY_VTP_IN_MUE_NAME, + MODIFY_VTP_IN_VTP_UE_IDX, + MODIFY_VTP_IN_VTP_VTPN, + MODIFY_VTP_IN_VTP_LOCAL_JETTY, + MODIFY_VTP_IN_VTP_LOCAL_EID, + MODIFY_VTP_IN_VTP_PEER_EID, + MODIFY_VTP_IN_VTP_PEER_JETTY, + MODIFY_VTP_IN_VTP_FLAG, + MODIFY_VTP_IN_VTP_TRANS_MODE, + MODIFY_VTP_IN_VTP_VALUE, + MODIFY_VTP_IN_CFG_CNT, + MODIFY_VTP_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_get_dev_info, in/out type should be continuous */ +enum ubcore_cmd_get_dev_info_type { + /* In type */ + GET_DEV_INFO_IN_TARGET_MUE_NAME, + GET_DEV_INFO_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_DEV_INFO_OUT_MAX_MTU = UBCORE_CMD_OUT_TYPE_INIT, + GET_DEV_INFO_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_create_ctp, in/out type should be continuous */ +enum ubcore_cmd_create_ctp_type { + /* In type */ + CREATE_CTP_IN_MUE_NAME, + CREATE_CTP_IN_CTP_CFG_PEER_NET_ADDR, + CREATE_CTP_IN_CTP_CFG_CNA_LEN, + CREATE_CTP_IN_VTP_UE_IDX, + CREATE_CTP_IN_VTP_VTPN, + CREATE_CTP_IN_VTP_LOCAL_JETTY, + CREATE_CTP_IN_VTP_LOCAL_EID, + CREATE_CTP_IN_VTP_PEER_EID, + CREATE_CTP_IN_VTP_PEER_JETTY, + CREATE_CTP_IN_VTP_FLAG, + CREATE_CTP_IN_VTP_TRANS_MODE, + CREATE_CTP_IN_VTP_VALUE, + CREATE_CTP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_CTP_OUT_IDX = UBCORE_CMD_OUT_TYPE_INIT, + CREATE_CTP_OUT_VTPN, + CREATE_CTP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_destroy_ctp, in/out type should be continuous */ +enum ubcore_cmd_destroy_ctp_type { + /* In type */ + DESTROY_CTP_IN_MUE_NAME, + DESTROY_CTP_IN_CTP_IDX, + DESTROY_CTP_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_change_tpg_to_error, in/out type should be continuous */ +enum ubcore_cmd_change_tpg_to_error_type { + /* In type */ + CHANGE_TPG_TO_ERROR_IN_TPGN, + CHANGE_TPG_TO_ERROR_IN_MUE_MUE_NAME, + CHANGE_TPG_TO_ERROR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CHANGE_TPG_TO_ERROR_OUT_TP_ERROR_CNT = UBCORE_CMD_OUT_TYPE_INIT, + CHANGE_TPG_TO_ERROR_OUT_CHANGE_TP_TO_ERR_FAIL_CNT, + CHANGE_TPG_TO_ERROR_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_eid */ +enum ubcore_cmd_opt_eid_type { + /* In type */ + OPT_EID_IN_DEV_NAME, + OPT_EID_IN_UPI, + OPT_EID_IN_UE_IDX, + OPT_EID_IN_EID, + OPT_EID_IN_EID_INDEX, + OPT_EID_IN_UPDATE_EID_TBL, + OPT_EID_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_query_ue_idx, in/out type should be continuous */ +enum ubcore_cmd_opt_query_ue_idx_type { + /* In type */ + OPT_QUERY_UE_IDX_IN_DEV_NAME, + OPT_QUERY_UE_IDX_IN_DEVID_RAW, + OPT_QUERY_UE_IDX_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + OPT_QUERY_UE_IDX_OUT_UE_IDX = UBCORE_CMD_OUT_TYPE_INIT, + OPT_QUERY_UE_IDX_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_config_dscp_vl */ +enum ubcore_cmd_opt_config_dscp_vl_type { + /* In type */ + OPT_CONFIG_DSCP_VL_IN_DEV_NAME, + OPT_CONFIG_DSCP_VL_IN_DSCP, + OPT_CONFIG_DSCP_VL_IN_VL, + OPT_CONFIG_DSCP_VL_IN_NUM_VALUE, + OPT_CONFIG_DSCP_VL_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_map_target_vtp */ +enum ubcore_cmd_map_target_vtp_type { + /* In type */ + MAP_TARGET_VTP_IN_MUE_NAME, + MAP_TARGET_VTP_IN_VTP_UE_IDX, + MAP_TARGET_VTP_IN_VTP_VTPN, + MAP_TARGET_VTP_IN_VTP_LOCAL_JETTY, + MAP_TARGET_VTP_IN_VTP_LOCAL_EID, + MAP_TARGET_VTP_IN_VTP_PEER_EID, + MAP_TARGET_VTP_IN_VTP_PEER_JETTY, + MAP_TARGET_VTP_IN_VTP_FLAG, + MAP_TARGET_VTP_IN_VTP_TRANS_MODE, + MAP_TARGET_VTP_IN_VTP_VALUE, + MAP_TARGET_VTP_IN_ROLE, + MAP_TARGET_VTP_IN_EID_IDX, + MAP_TARGET_VTP_IN_UPI, + MAP_TARGET_VTP_IN_SHARE_MODE, + MAP_TARGET_VTP_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_list_migrate_entry, in/out type should be continuous */ +enum ubcore_cmd_list_migrate_entry_type { + /* In type */ + LIST_MIG_ENTRY_IN_MUE_NAME, + LIST_MIG_ENTRY_IN_CNT, + LIST_MIG_ENTRY_IN_UE_IDX_LIST, + LIST_MIG_ENTRY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + LIST_MIG_ENTRY_OUT_STATS_LIST = UBCORE_CMD_OUT_TYPE_INIT, + LIST_MIG_ENTRY_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_query_dscp_vl, in/out type should be continuous */ +enum ubcore_cmd_opt_query_dscp_vl_type { + /* In type */ + OPT_QUERY_DSCP_VL_IN_DEV_NAME, + OPT_QUERY_DSCP_VL_IN_DSCP, + OPT_QUERY_DSCP_VL_IN_NUM_VALUE, + OPT_QUERY_DSCP_VL_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + OPT_QUERY_DSCP_VL_OUT_VL = UBCORE_CMD_OUT_TYPE_INIT, + OPT_QUERY_DSCP_VL_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_dfx_query_stats, in/out type should be continuous */ +enum ubcore_cmd_opt_dfx_query_stats_type { + /* In type */ + OPT_DFX_QUERY_STATS_IN_DEV_NAME, + OPT_DFX_QUERY_STATS_IN_TYPE, + OPT_DFX_QUERY_STATS_IN_ID, + OPT_DFX_QUERY_STATS_IN_EXT, + OPT_DFX_QUERY_STATS_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + OPT_DFX_QUERY_STATS_OUT_TX_PKT = UBCORE_CMD_OUT_TYPE_INIT, + OPT_DFX_QUERY_STATS_OUT_RX_PKT, + OPT_DFX_QUERY_STATS_OUT_TX_BYTES, + OPT_DFX_QUERY_STATS_OUT_RX_BYTES, + OPT_DFX_QUERY_STATS_OUT_TX_PKT_ERR, + OPT_DFX_QUERY_STATS_OUT_RX_PKT_ERR, + OPT_DFX_QUERY_STATS_OUT_TX_TIMEOUT_CNT, + OPT_DFX_QUERY_STATS_OUT_RX_CE_PKT, + OPT_DFX_QUERY_STATS_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_opt_dfx_query_res, in/out type should be continuous */ +enum ubcore_cmd_opt_dfx_query_res_type { + /* In type */ + DFX_QUERY_RES_IN_DEV_NAME, + DFX_QUERY_RES_IN_TYPE, + DFX_QUERY_RES_IN_KEY, + DFX_QUERY_RES_IN_KEY_EXT, + DFX_QUERY_RES_IN_KEY_CNT, + DFX_QUERY_RES_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + /* vtp, see struct ubcore_res_vtp_val */ + DFX_QUERY_RES_OUT_VTP_UE_IDX = UBCORE_CMD_OUT_TYPE_INIT, + DFX_QUERY_RES_OUT_VTP_VTPN, + DFX_QUERY_RES_OUT_VTP_LOCAL_EID, + DFX_QUERY_RES_OUT_VTP_LOCAL_JETTY, + DFX_QUERY_RES_OUT_VTP_PEER_EID, + DFX_QUERY_RES_OUT_VTP_PEER_JETTY, + DFX_QUERY_RES_OUT_VTP_FLAG, + DFX_QUERY_RES_OUT_VTP_TRANS_MODE, + DFX_QUERY_RES_OUT_VTP_TPGN, + /* tp, see struct ubcore_res_tp_val */ + DFX_QUERY_RES_OUT_TP_TPN, + DFX_QUERY_RES_OUT_TP_TX_PSN, + DFX_QUERY_RES_OUT_TP_RX_PSN, + DFX_QUERY_RES_OUT_TP_DSCP, + DFX_QUERY_RES_OUT_TP_OOR_EN, + DFX_QUERY_RES_OUT_TP_SEL_RET_EN, + DFX_QUERY_RES_OUT_TP_STATE, + DFX_QUERY_RES_OUT_TP_DATA_UDP_START, + DFX_QUERY_RES_OUT_TP_ACK_UDP_START, + DFX_QUERY_RES_OUT_TP_UDP_RANGE, + DFX_QUERY_RES_OUT_TP_SPRAY_EN, + /* tpg, see struct ubcore_res_dfx_tpg_info */ + DFX_QUERY_RES_OUT_TPG_TP_CNT, + DFX_QUERY_RES_OUT_TPG_DSCP, + DFX_QUERY_RES_OUT_TPG_TP_STATE, + DFX_QUERY_RES_OUT_TPG_TPN, + /* utp, see struct ubcore_res_utp_val */ + DFX_QUERY_RES_OUT_UTP_UTPN, + DFX_QUERY_RES_OUT_UTP_DATA_UDP_START, + DFX_QUERY_RES_OUT_UTP_UDP_RANGE, + DFX_QUERY_RES_OUT_UTP_FLAG, + /* mue, see struct ubcore_res_dev_tp_val */ + DFX_QUERY_RES_OUT_MUE_VTP_CNT, + DFX_QUERY_RES_OUT_MUE_TP_CNT, + DFX_QUERY_RES_OUT_MUE_TPG_CNT, + DFX_QUERY_RES_OUT_MUE_UTP_CNT, + DFX_QUERY_RES_OUT_VPORT_EID_USE_CNT, + DFX_QUERY_RES_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_discover_dmac, in/out type should be continuous */ +enum ubcore_cmd_discover_dmac_type { + /* In type */ + DISCOVER_DMAC_IN_SIP, + DISCOVER_DMAC_IN_DIP, + DISCOVER_DMAC_IN_MUE_NAME, + DISCOVER_DMAC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DISCOVER_DMAC_OUT_DMAC = UBCORE_CMD_OUT_TYPE_INIT, + DISCOVER_DMAC_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_clear_vice_tpg */ +enum ubcore_cmd_clear_vice_tpg_type { + /* In type */ + CLEAR_VICE_TPG_IN_MUE_NAME, + CLEAR_VICE_TPG_IN_LOCATION, + CLEAR_VICE_TPG_IN_LOCAL_EID, + CLEAR_VICE_TPG_IN_PEER_EID, + CLEAR_VICE_TPG_IN_TPGN, + CLEAR_VICE_TPG_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_user_ctl_arg. Note: all of in/out parameters should be copied from user */ +/* So they are IN type */ +enum ubcore_cmd_user_ctl_type { + /* In type */ + USER_CTL_IN_MUE_NAME, + USER_CTL_IN_ADDR, + USER_CTL_IN_LEN, + USER_CTL_IN_OPCODE, + USER_CTL_OUT_ADDR, + USER_CTL_OUT_LEN, + /* No need to handle rsv */ + USER_CTL_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_register_uvs_info */ +enum ubcore_cmd_register_uvs_info_type { + /* In type */ + REGISTER_UVS_INFO_IN_UVS_NAME, + REGISTER_UVS_INFO_IN_UVS_POLICY, + REGISTER_UVS_INFO_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_unregister_uvs_info */ +enum ubcore_cmd_unregister_uvs_info_type { + /* In type */ + UNREGISTER_UVS_INFO_IN_UVS_NAME, + UNREGISTER_UVS_INFO_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_get_vtp_table_cnt */ +enum ubcore_cmd_get_vtp_table_cnt_type { + /* Out type */ + GET_VTP_TABLE_CNT_OUT_VTP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + GET_VTP_TABLE_CNT_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_restored_vtp_entry, in/out type should be continuous */ +enum ubcore_cmd_restored_vtp_entry_type { + /* In type */ + RESTORE_VTP_IN_VTP_CNT, + RESTORE_VTP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + RESTORE_VTP_OUT_VTP_CNT = UBCORE_CMD_OUT_TYPE_INIT, + RESTORE_VTP_OUT_ENTRY, + RESTORE_VTP_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_get_tpg_table_cnt */ +enum ubcore_cmd_get_tpg_table_cnt_type { + /* Out type */ + GET_TPG_TABLE_CNT_OUT_TPG_CNT = UBCORE_CMD_OUT_TYPE_INIT, + GET_TPG_TABLE_CNT_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_restored_tpg_entry, in/out type should be continuous */ +enum ubcore_cmd_restored_tpg_entry_type { + /* In type */ + RESTORE_TPG_IN_TPG_CNT, + RESTORE_TPG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + RESTORE_TPG_OUT_TPG_CNT = UBCORE_CMD_OUT_TYPE_INIT, + RESTORE_TPG_OUT_ENTRY, + RESTORE_TPG_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_get_ue_table_cnt */ +enum ubcore_cmd_get_ue_table_cnt_type { + /* Out type */ + GET_UE_TABLE_CNT_OUT_UE_CNT = UBCORE_CMD_OUT_TYPE_INIT, + GET_UE_TABLE_CNT_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct ubcore_cmd_restored_ue_entry, in/out type should be continuous */ +enum ubcore_cmd_restored_ue_entry_type { + /* In type */ + RESTORE_UE_IN_UE_CNT, + RESTORE_UE_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + RESTORE_UE_OUT_ENTRY = UBCORE_CMD_OUT_TYPE_INIT, + RESTORE_UE_OUT_NUM /* Only for calculating number of types */ +}; + +enum ubcore_cmd_list_mue_type { + /* In type */ + LIST_MUE_IN_NUM, /* Only for calculating number of types */ + + /* Out type */ + LIST_MUE_OUT_MUE_CNT = UBCORE_CMD_OUT_TYPE_INIT, + LIST_MUE_OUT_MUE_NAME, + LIST_MUE_OUT_NETDEV_NAME, + LIST_MUE_OUT_NUM /* Only for calculating number of types */ +}; + +enum ubcore_cmd_set_topo_type { + /* In type */ + SET_TOPO_IN_TOPO_INFO, + SET_TOPO_IN_TOPO_NUM, + SET_TOPO_IN_NUM /* Only for calculating number of types */ +}; + +int ubcore_mue_tlv_parse(struct ubcore_cmd_hdr *hdr, void *arg); +int ubcore_mue_tlv_append(struct ubcore_cmd_hdr *hdr, void *arg); +int ubcore_global_tlv_parse(struct ubcore_cmd_hdr *hdr, void *arg); +int ubcore_global_tlv_append(struct ubcore_cmd_hdr *hdr, void *arg); + +int ubcore_mue_tlv_parse_post(struct ubcore_cmd_hdr *hdr, void *arg); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c new file mode 100644 index 000000000000..b166450778bc --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connect adapter implementation file + * Author: Wang Hang + * Create: 2025-06-19 + * Note: + * History: 2025-06-19: create file + */ + +#include "ubcore_connect_adapter.h" + +#include + +#include "net/ubcore_net.h" +#include "net/ubcore_session.h" +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" + +enum msg_create_conn_result { + CREATE_CONN_SUCCESS = 0, + GET_TP_LIST_ERROR, + ACTIVE_TP_ERROR, + CREATE_CONN_FAIL +}; + +struct session_data_create_conn { + uint64_t peer_tp_handle; + uint32_t rx_psn; + int ret; +}; + +struct msg_create_conn_req { + struct ubcore_get_tp_cfg get_tp_cfg; + uint64_t tp_handle; + uint32_t tx_psn; +}; + +struct msg_create_conn_resp { + uint64_t tp_handle; + uint32_t tx_psn; + int result; /* Refer to enum msg_create_conn_result */ +}; + +struct msg_destroy_conn_req { + union ubcore_tp_handle tp_handle; +}; + +static int ubcore_active_tp(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_cfg) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->active_tp == NULL || + active_cfg == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ubcore_log_info("Active tp, local tp_hdl: %llu, peer tp_hdl: %llu.\n", + active_cfg->tp_handle.value, + active_cfg->peer_tp_handle.value); + ret = dev->ops->active_tp(dev, active_cfg); + if (ret != 0) + ubcore_log_err( + "Failed to active tp, ret: %d, local tpid: %u.\n", ret, + (uint32_t)active_cfg->tp_handle.bs.tpid); + + return ret; +} + +static int ubcore_deactive_tp(struct ubcore_device *dev, + union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->active_tp == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ret = dev->ops->deactive_tp(dev, tp_handle, udata); + if (ret != 0) + ubcore_log_err("Failed to deactivate tp, ret: %d.\n", ret); + + return ret; +} + +static struct ubcore_session * +create_session_for_create_connection(struct ubcore_device *dev) +{ + struct ubcore_session *session; + struct session_data_create_conn *session_data; + + session_data = + kzalloc(sizeof(struct session_data_create_conn), GFP_KERNEL); + if (IS_ERR_OR_NULL(session_data)) { + ubcore_log_err("Failed to alloc create user arg"); + return NULL; + } + + session_data->ret = -1; + + session = ubcore_session_create(dev, session_data, 0, NULL, NULL); + if (session == NULL) { + ubcore_log_err("Failed to alloc session for create connection"); + kfree(session_data); + return NULL; + } + + return session; +} + +static int send_create_req(struct ubcore_device *dev, uint32_t session_id, + struct msg_create_conn_req *req) +{ + struct ubcore_net_msg msg = { 0 }; + int ret; + + msg.type = UBCORE_NET_CREATE_REQ; + msg.len = (uint16_t)sizeof(struct msg_create_conn_req); + msg.session_id = session_id; + msg.data = req; + + ret = ubcore_net_send_to(dev, &msg, req->get_tp_cfg.peer_eid); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +static int send_create_resp(struct ubcore_device *dev, void *conn, + uint32_t session_id, + struct msg_create_conn_resp *resp) +{ + struct ubcore_net_msg msg = { 0 }; + int ret; + + msg.type = UBCORE_NET_CREATE_RESP; + msg.len = (uint16_t)sizeof(struct msg_create_conn_resp); + msg.session_id = session_id; + msg.data = resp; + + ret = ubcore_net_send(dev, &msg, conn); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +static int ubcore_add_ex_tp_info(struct ubcore_device *dev, uint64_t tp_handle) +{ + struct ubcore_ex_tp_info *ex_tp_info = NULL; + uint32_t hash; + int ret; + + ex_tp_info = kzalloc(sizeof(struct ubcore_ex_tp_info), GFP_KERNEL); + if (ex_tp_info == NULL) + return -ENOMEM; + ex_tp_info->tp_handle = tp_handle; + kref_init(&ex_tp_info->ref_cnt); + + hash = ubcore_get_ex_tp_hash(&tp_handle); + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_EX_TP], + &ex_tp_info->hnode, hash); + if (ret != 0) { + ubcore_log_err("Failed to add ex tp info, tp_handle: %llu.\n", + tp_handle); + kfree(ex_tp_info); + } + + return ret; +} + +struct ubcore_ex_tp_info * +ubcore_find_remove_ex_tp_info(struct ubcore_device *dev, uint64_t tp_handle) +{ + struct ubcore_ex_tp_info *ex_tp_info = NULL; + uint32_t hash; + + hash = ubcore_get_ex_tp_hash(&tp_handle); + spin_lock(&dev->ht[UBCORE_HT_EX_TP].lock); + if (dev->ht[UBCORE_HT_EX_TP].head == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_EX_TP].lock); + return NULL; + } + + ex_tp_info = ubcore_hash_table_lookup_nolock(&dev->ht[UBCORE_HT_EX_TP], + hash, &tp_handle); + if (ex_tp_info == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_EX_TP].lock); + ubcore_log_warn("Failed to find ex_tp_info, tp_handle: %llu.\n", + tp_handle); + return NULL; + } + ubcore_hash_table_remove_nolock(&dev->ht[UBCORE_HT_EX_TP], + &ex_tp_info->hnode); + spin_unlock(&dev->ht[UBCORE_HT_EX_TP].lock); + + return ex_tp_info; +} + +static bool ubcore_check_ex_tp_info(struct ubcore_device *dev, + uint64_t tp_handle) +{ + struct ubcore_ex_tp_info *ex_tp_info = NULL; + + ex_tp_info = ubcore_find_remove_ex_tp_info(dev, tp_handle); + if (ex_tp_info == NULL) + return false; + + kfree(ex_tp_info); + return true; +} + +static bool ubcore_is_loopback(struct ubcore_device *dev, + union ubcore_eid *peer_eid) +{ + uint32_t eid_idx; + + spin_lock(&dev->eid_table.lock); + for (eid_idx = 0; eid_idx < dev->eid_table.eid_cnt; eid_idx++) { + if (dev->eid_table.eid_entries[eid_idx].valid && + memcmp(peer_eid, &dev->eid_table.eid_entries[eid_idx].eid, + sizeof(union ubcore_eid)) == 0) { + spin_unlock(&dev->eid_table.lock); + return true; + } + } + spin_unlock(&dev->eid_table.lock); + + return false; +} + +/* free local tp_handle after exchange tp_info error */ +static void ubcore_free_local_tpid(struct ubcore_device *dev, + uint64_t tp_handle, uint32_t tx_psn, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_cfg = { 0 }; + union ubcore_tp_handle local_tp_hdl; + int ret; + + active_cfg.tp_handle.value = tp_handle; + active_cfg.peer_tp_handle.value = tp_handle; + active_cfg.tp_attr.tx_psn = tx_psn; + active_cfg.tp_attr.rx_psn = tx_psn; + + ubcore_log_info("Try to free local_tpid: %u.\n", + (uint32_t)active_cfg.tp_handle.bs.tpid); + ret = ubcore_active_tp(dev, &active_cfg); + if (ret != 0) + ubcore_log_err("Failed to active tp, ret: %d.\n", ret); + + local_tp_hdl.value = tp_handle; + ret = ubcore_deactive_tp(dev, local_tp_hdl, udata); + if (ret != 0) + ubcore_log_err("Failed to deactivate tp, ret: %d.\n", ret); +} + +int ubcore_exchange_tp_info(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *cfg, uint64_t tp_handle, + uint32_t tx_psn, uint64_t *peer_tp_handle, + uint32_t *rx_psn, struct ubcore_udata *udata) +{ + struct session_data_create_conn *session_data; + struct msg_create_conn_req req = { 0 }; + struct ubcore_session *session; + int ret; + + if (dev == NULL || cfg == NULL || peer_tp_handle == NULL || + rx_psn == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_is_loopback(dev, &cfg->peer_eid)) { + *peer_tp_handle = tp_handle; + *rx_psn = tx_psn; + ubcore_log_info("Finish to handle loop back tp: %llu.\n", tp_handle); + return 0; + } + + session = create_session_for_create_connection(dev); + if (session == NULL) { + ubcore_free_local_tpid(dev, tp_handle, tx_psn, udata); + return -ENOMEM; + } + + req.get_tp_cfg = *cfg; + req.tp_handle = tp_handle; + req.tx_psn = tx_psn; + ret = send_create_req(dev, ubcore_session_get_id(session), &req); + if (ret != 0) { + ubcore_log_err("Failed to send create req message"); + ubcore_session_complete(session); + ubcore_session_ref_release(session); + ubcore_free_local_tpid(dev, tp_handle, tx_psn, udata); + return ret; + } + + ubcore_session_wait(session); + session_data = + (struct session_data_create_conn *)ubcore_session_get_data( + session); + ret = session_data->ret; + if (ret != 0) { + ubcore_log_err("Failed to send create req message, ret: %d.\n", + ret); + ubcore_session_ref_release(session); + ubcore_free_local_tpid(dev, tp_handle, tx_psn, udata); + return ret; + } + *peer_tp_handle = session_data->peer_tp_handle; + *rx_psn = session_data->rx_psn; + ubcore_session_ref_release(session); + + ret = ubcore_add_ex_tp_info(dev, tp_handle); + ubcore_log_info("exchange tp_handle is %llu\n", (unsigned long long)tp_handle); + /* ubcore_add_ex_tp_info result will not have effect on excange_tp_info result */ + if (ret != 0) + ubcore_log_err("Failed to add ex tp info, ret: %d.\n", ret); + return 0; +} +EXPORT_SYMBOL(ubcore_exchange_tp_info); + +static void handle_create_req(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_create_conn_req *req = + (struct msg_create_conn_req *)msg->data; + struct ubcore_get_tp_cfg get_tp_cfg = req->get_tp_cfg; + struct ubcore_active_tp_cfg active_cfg = { 0 }; + struct msg_create_conn_resp resp = { 0 }; + struct ubcore_tp_info tp_info = { 0 }; + uint32_t tp_cnt = 1; + uint64_t tp_handle; + uint32_t tx_psn; + int ret; + + get_tp_cfg.local_eid = req->get_tp_cfg.peer_eid; + get_tp_cfg.peer_eid = req->get_tp_cfg.local_eid; + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_info, NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err("Failed to get tp list, local eid " EID_FMT + ", peer eid " EID_FMT ".\n", + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + ret = GET_TP_LIST_ERROR; + goto send_resp; + } + ubcore_log_info("Rcv req, local eid " EID_FMT ", peer eid " EID_FMT + ", tp_hdl: %llu, tp_cnt: %u.\n", + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid), tp_info.tp_handle.value, + tp_info.tp_handle.bs.tp_cnt); + + tp_handle = tp_info.tp_handle.value; + tx_psn = get_random_u32(); + + active_cfg.tp_handle.value = tp_handle; + active_cfg.peer_tp_handle.value = req->tp_handle; + active_cfg.tp_attr.rx_psn = req->tx_psn; + active_cfg.tp_attr.tx_psn = tx_psn; + + ret = ubcore_active_tp(dev, &active_cfg); + if (ret != 0) { + ubcore_log_err("Failed to active tp, ret: %d.\n", ret); + ret = ACTIVE_TP_ERROR; + goto send_resp; + } + + resp.tp_handle = tp_handle; + resp.tx_psn = tx_psn; + ret = CREATE_CONN_SUCCESS; + +send_resp: + resp.result = ret; + if (send_create_resp(dev, conn, msg->session_id, &resp) != 0) + ubcore_log_err("Failed to send create resp message.\n"); +} + +static void handle_create_resp(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_create_conn_resp *resp = + (struct msg_create_conn_resp *)msg->data; + struct ubcore_session *session; + struct session_data_create_conn *session_data; + + session = ubcore_session_find(msg->session_id); + if (session == NULL) { + ubcore_log_err( + "Failed to find session %u on handle create-resp", + msg->session_id); + return; + } + session_data = + (struct session_data_create_conn *)ubcore_session_get_data( + session); + session_data->rx_psn = resp->tx_psn; + session_data->peer_tp_handle = resp->tp_handle; + session_data->ret = resp->result; + ubcore_log_info("Create response result: %d.\n", resp->result); + + ubcore_session_complete(session); + ubcore_session_ref_release(session); +} + +static int send_destroy_req(struct ubcore_device *dev, union ubcore_eid addr, + union ubcore_tp_handle tp_handle) +{ + struct ubcore_net_msg msg = { 0 }; + struct msg_destroy_conn_req req = { 0 }; + int ret; + + req.tp_handle = tp_handle; + + msg.type = UBCORE_NET_DESTROY_REQ; + msg.len = (uint16_t)sizeof(struct msg_destroy_conn_req); + msg.session_id = 0; + msg.data = &req; + + ret = ubcore_net_send_to(dev, &msg, addr); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +int ubcore_adapter_layer_disconnect(struct ubcore_vtpn *vtpn) +{ + union ubcore_tp_handle peer_tp_handle = + (union ubcore_tp_handle)vtpn->peer_tp_handle; + union ubcore_tp_handle tp_handle = + (union ubcore_tp_handle)vtpn->tp_handle; + union ubcore_eid peer_eid = vtpn->peer_eid; + struct ubcore_device *dev = vtpn->ub_dev; + struct ubcore_udata udata = {0}; + bool ctp = tp_handle.bs.ctp; + int ret; + + if (vtpn->uspace) + ret = ubcore_deactive_tp(dev, tp_handle, &udata); + else + ret = ubcore_deactive_tp(dev, tp_handle, NULL); + if (ret != 0) { + ubcore_log_err("Failed to deactivate tp\n"); + return ret; + } + if (ubcore_is_loopback(dev, &peer_eid)) { + ubcore_log_info( + "Loop-back, tp_handle: %llu,peer_tp_handle: %llu.\n", + vtpn->tp_handle, vtpn->peer_tp_handle); + return 0; + } + if (!ubcore_check_ex_tp_info(dev, vtpn->tp_handle)) { + ubcore_log_info( + "No need to notify destroy request, tp_handle: %llu.\n", + vtpn->tp_handle); + return 0; + } + + /* Only send destroy request for RM/RC TP */ + if ((vtpn->trans_mode == UBCORE_TP_RM || + vtpn->trans_mode == UBCORE_TP_RC) && + !ctp && ubcore_check_ctrlplane_compat(dev->ops->import_jetty)) { + ret = send_destroy_req(dev, peer_eid, peer_tp_handle); + if (ret != 0) + ubcore_log_err("Failed to send destroy req message"); + } + + return 0; +} + +static void handle_destroy_req(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_destroy_conn_req *req = + (struct msg_destroy_conn_req *)msg->data; + int ret; + + /* Target tp_handle get from kernel space */ + ret = ubcore_deactive_tp(dev, req->tp_handle, NULL); + if (ret != 0) + ubcore_log_err("Failed to deactivate tp"); +} + +/* Only for impoprt_jetty/jfr, thus only for RM/UM */ +static int ubcore_fill_get_tp_cfg(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *get_tp_cfg, + struct ubcore_tjetty_cfg *cfg) +{ + uint32_t eid_index = cfg->eid_index; + + if (cfg->tp_type == UBCORE_CTP) + get_tp_cfg->flag.bs.ctp = 1; + else if (cfg->tp_type == UBCORE_RTP) + get_tp_cfg->flag.bs.rtp = 1; + else + get_tp_cfg->flag.bs.utp = 1; + + get_tp_cfg->trans_mode = cfg->trans_mode; + + spin_lock(&dev->eid_table.lock); + if (eid_index >= dev->eid_table.eid_cnt || + dev->eid_table.eid_entries == NULL || + dev->eid_table.eid_entries[eid_index].valid == false) { + spin_unlock(&dev->eid_table.lock); + ubcore_log_err("Invalid parameter, eid_index: %u.\n", + eid_index); + return -EINVAL; + } + get_tp_cfg->local_eid = dev->eid_table.eid_entries[eid_index].eid; + spin_unlock(&dev->eid_table.lock); + get_tp_cfg->peer_eid = cfg->id.eid; + + return 0; +} + +struct ubcore_tjetty *ubcore_import_jfr_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; + struct ubcore_tp_info tp_list = { 0 }; + struct ubcore_tjetty *tjfr = NULL; + uint32_t tp_cnt = 1; + int ret; + + if (cfg->trans_mode != UBCORE_TP_RM && + cfg->trans_mode != UBCORE_TP_UM) { + ubcore_log_err("Invalid trans_mode %d.\n", + (int)cfg->trans_mode); + return ERR_PTR(-EINVAL); + } + + if (ubcore_fill_get_tp_cfg(dev, &get_tp_cfg, cfg) != 0) + return NULL; + + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_list, NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err("Failed to get tp list, ret: %d, tp_cnt: %u.\n", + ret, tp_cnt); + return NULL; + } + + active_tp_cfg.tp_handle = tp_list.tp_handle; + + if (cfg->trans_mode == UBCORE_TP_RM && + cfg->tp_type == UBCORE_RTP) { + active_tp_cfg.tp_attr.tx_psn = get_random_u32(); + ret = ubcore_exchange_tp_info( + dev, &get_tp_cfg, tp_list.tp_handle.value, + active_tp_cfg.tp_attr.tx_psn, + &active_tp_cfg.peer_tp_handle.value, + &active_tp_cfg.tp_attr.rx_psn, udata); + if (ret != 0) { + ubcore_log_err("Failed to exchange tp info, ret: %d.\n", + ret); + return NULL; + } + ubcore_log_info("Finish to exchange tp info, local eid " EID_FMT + ", peer eid " EID_FMT ".\n", + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + } + + tjfr = ubcore_import_jfr_ex(dev, cfg, &active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjfr)) + ubcore_log_err("Failed to import jfr ex.\n"); + + return tjfr; +} + +struct ubcore_tjetty *ubcore_import_jetty_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; + struct ubcore_tp_info tp_list = { 0 }; + struct ubcore_tjetty *tjetty = NULL; + uint32_t tp_cnt = 1; + int ret; + + if (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM) { + if (ubcore_fill_get_tp_cfg(dev, &get_tp_cfg, cfg) != 0) + return NULL; + + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_list, + NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err( + "Failed to get tp list, ret: %d, tp_cnt: %u.\n", + ret, tp_cnt); + return NULL; + } + + active_tp_cfg.tp_handle = tp_list.tp_handle; + + if (cfg->trans_mode == UBCORE_TP_RM && + cfg->tp_type == UBCORE_RTP) { + active_tp_cfg.tp_attr.tx_psn = get_random_u32(); + ret = ubcore_exchange_tp_info( + dev, &get_tp_cfg, tp_list.tp_handle.value, + active_tp_cfg.tp_attr.tx_psn, + &active_tp_cfg.peer_tp_handle.value, + &active_tp_cfg.tp_attr.rx_psn, udata); + if (ret != 0) { + ubcore_log_err( + "Failed to exchange tp info, ret: %d.\n", + ret); + return NULL; + } + ubcore_log_info( + "Finish to exchange tp info, local eid " EID_FMT + ", peer eid " EID_FMT ".\n", + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + } + } + + tjetty = ubcore_import_jetty_ex(dev, cfg, &active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) + ubcore_log_err("Failed to import jetty ex.\n"); + + return tjetty; +} + +int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; + struct ubcore_device *dev = jetty->ub_dev; + struct ubcore_tp_info tp_list = { 0 }; + uint32_t tp_cnt = 1; + int ret; + + ret = ubcore_fill_get_tp_cfg(dev, &get_tp_cfg, &tjetty->cfg); + if (ret != 0) + return ret; + + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_list, NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err("Failed to get tp list, ret: %d, tp_cnt: %u.\n", + ret, tp_cnt); + return ret; + } + + active_tp_cfg.tp_handle = tp_list.tp_handle; + active_tp_cfg.tp_attr.tx_psn = get_random_u32(); + + if (tjetty->cfg.tp_type == UBCORE_RTP) { + ret = ubcore_exchange_tp_info(dev, &get_tp_cfg, + tp_list.tp_handle.value, + active_tp_cfg.tp_attr.tx_psn, + &active_tp_cfg.peer_tp_handle.value, + &active_tp_cfg.tp_attr.rx_psn, udata); + if (ret != 0) { + ubcore_log_err("Failed to exchange tp info, ret: %d.\n", ret); + return ret; + } + ubcore_log_info("Finish to exchange tp info, local eid " EID_FMT + ", peer eid " EID_FMT ".\n", + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + } + + ret = ubcore_bind_jetty_ex(jetty, tjetty, &active_tp_cfg, udata); + if (ret != 0) + ubcore_log_err("Failed to bind jetty ex, ret: %d.\n", ret); + + return ret; +} + +void ubcore_exchange_init(void) +{ + ubcore_net_register_msg_handler(UBCORE_NET_CREATE_REQ, + handle_create_req, + sizeof(struct msg_create_conn_req)); + ubcore_net_register_msg_handler(UBCORE_NET_CREATE_RESP, + handle_create_resp, + sizeof(struct msg_create_conn_resp)); + ubcore_net_register_msg_handler(UBCORE_NET_DESTROY_REQ, + handle_destroy_req, + sizeof(struct msg_destroy_conn_req)); +} diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.h b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h new file mode 100644 index 000000000000..f41f46463870 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connect adapter header file + * Author: Wang Hang + * Create: 2025-06-19 + * Note: + * History: 2025-06-19: create file + */ + +#ifndef UBCORE_CONNECT_ADAPTER_H +#define UBCORE_CONNECT_ADAPTER_H + +#include + +struct ubcore_ex_tp_info { + struct hlist_node hnode; /* key: tp_handle */ + uint64_t tp_handle; + struct kref ref_cnt; +}; + +struct ubcore_tjetty *ubcore_import_jfr_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +struct ubcore_tjetty *ubcore_import_jetty_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +int ubcore_bind_jetty_compat(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); + +int ubcore_adapter_layer_disconnect(struct ubcore_vtpn *vtpn); + +void ubcore_exchange_init(void); + +static inline bool ubcore_check_ctrlplane_compat(void *op_ptr) +{ + return (op_ptr == NULL); +} + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_connect_bonding.c b/drivers/ub/urma/ubcore/ubcore_connect_bonding.c new file mode 100644 index 000000000000..d88f7d24643f --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_connect_bonding.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connect bonding implementation file + * Author: Wang Hang + * Create: 2025-08-07 + * Note: + * History: 2025-08-07: create file + */ + +#include "ubcore_connect_bonding.h" + +#include "net/ubcore_net.h" +#include "ubcore_priv.h" +#include "ubcore_topo_info.h" +#include + +#define BONDING_UDATA_BUF_LEN 960 + +struct session_data_exchange_udata { + int *result; + char *udata_out; + uint32_t udata_out_size; +}; + +struct msg_seg_info_req { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t token_id; +}; + +struct msg_jetty_info_req { + struct ubcore_jetty_id jetty_id; + bool is_jfr; +}; + +struct msg_seg_info_resp { + int result; + char seg_info[BONDING_UDATA_BUF_LEN]; +}; + +struct msg_jetty_info_resp { + int result; + char jetty_info[BONDING_UDATA_BUF_LEN]; +}; + +static struct ubcore_device *ubcore_find_physical_device(void) +{ + struct ubcore_topo_map *topo_map; + struct ubcore_topo_info *topo_info; + union ubcore_eid *primary_eid; + + topo_map = ubcore_get_global_topo_map(); + if (topo_map == NULL) { + ubcore_log_err("Failed get global topo map"); + return NULL; + } + + topo_info = ubcore_get_cur_topo_info(topo_map); + if (topo_info == NULL) { + ubcore_log_err("Failed get global topo info"); + return NULL; + } + + primary_eid = (union ubcore_eid *)topo_info->io_die_info[0].primary_eid; + return ubcore_find_device(primary_eid, UBCORE_TRANSPORT_UB); +} + +static struct ubcore_device *ubcore_find_bonding_device(void) +{ + struct ubcore_topo_map *topo_map; + struct ubcore_topo_info *topo_info; + union ubcore_eid *bonding_eid; + + topo_map = ubcore_get_global_topo_map(); + if (topo_map == NULL) { + ubcore_log_err("Failed get global topo map"); + return NULL; + } + + topo_info = ubcore_get_cur_topo_info(topo_map); + if (topo_info == NULL) { + ubcore_log_err("Failed get global topo info"); + return NULL; + } + + bonding_eid = (union ubcore_eid *)topo_info->bonding_eid; + return ubcore_find_device(bonding_eid, UBCORE_TRANSPORT_UB); +} + +static struct ubcore_session * +create_session_for_exchange_udata(struct ubcore_device *dev, + int *result, char *udata_out, uint32_t udata_out_size) +{ + struct ubcore_session *session; + struct session_data_exchange_udata *session_data; + + session_data = + kzalloc(sizeof(struct session_data_exchange_udata), GFP_KERNEL); + if (IS_ERR_OR_NULL(session_data)) { + ubcore_log_err("Failed to alloc exchange seg info user arg"); + return NULL; + } + session_data->result = result; + session_data->udata_out = udata_out; + session_data->udata_out_size = udata_out_size; + + session = ubcore_session_create(dev, session_data, 0, NULL, NULL); + if (session == NULL) { + ubcore_log_err("Failed to alloc session for exchange seg info"); + kfree(session_data); + return NULL; + } + + return session; +} + +static int send_seg_info_req(struct ubcore_device *dev, uint32_t session_id, + struct msg_seg_info_req *req) +{ + struct ubcore_net_msg msg = { 0 }; + union ubcore_eid dest_eid = { 0 }; + int ret; + + msg.type = UBCORE_NET_BONDING_SEG_INFO_REQ; + msg.len = sizeof(struct msg_seg_info_req); + msg.session_id = session_id; + msg.data = req; + + ret = ubcore_get_primary_eid_by_bonding_eid(&req->ubva.eid, &dest_eid); + if (ret != 0) + return ret; + + ubcore_log_info("Send seg info req to " EID_FMT "\n", + EID_ARGS(dest_eid)); + ret = ubcore_net_send_to(dev, &msg, dest_eid); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +static int send_seg_info_resp(struct ubcore_device *dev, void *conn, + uint32_t session_id, + struct msg_seg_info_resp *resp) +{ + struct ubcore_net_msg msg = { 0 }; + int ret; + + msg.type = UBCORE_NET_BONDING_SEG_INFO_RESP; + msg.len = sizeof(struct msg_seg_info_resp); + msg.session_id = session_id; + msg.data = resp; + + ret = ubcore_net_send(dev, &msg, conn); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +static int send_jetty_info_req(struct ubcore_device *dev, uint32_t session_id, + struct msg_jetty_info_req *req) +{ + struct ubcore_net_msg msg = { 0 }; + union ubcore_eid dest_eid = { 0 }; + int ret; + + msg.type = UBCORE_NET_BONDING_JETTY_INFO_REQ; + msg.len = sizeof(struct msg_jetty_info_req); + msg.session_id = session_id; + msg.data = req; + + ret = ubcore_get_primary_eid_by_bonding_eid(&req->jetty_id.eid, + &dest_eid); + if (ret != 0) + return ret; + + ubcore_log_info("Send jetty info req to " EID_FMT "\n", + EID_ARGS(dest_eid)); + ret = ubcore_net_send_to(dev, &msg, dest_eid); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +static int send_jetty_info_resp(struct ubcore_device *dev, void *conn, + uint32_t session_id, + struct msg_jetty_info_resp *resp) +{ + struct ubcore_net_msg msg = { 0 }; + int ret; + + msg.type = UBCORE_NET_BONDING_JETTY_INFO_RESP; + msg.len = sizeof(struct msg_jetty_info_resp); + msg.session_id = session_id; + msg.data = resp; + + ret = ubcore_net_send(dev, &msg, conn); + if (ret != 0) { + ubcore_log_err("Failed to send msg"); + return ret; + } + return 0; +} + +int ubcore_connect_exchange_udata_when_import_seg(struct ubcore_seg *seg, + struct ubcore_udata *udata) +{ + struct ubcore_device *physical_dev = ubcore_find_physical_device(); + struct msg_seg_info_req req = { 0 }; + struct ubcore_session *session; + char buf[BONDING_UDATA_BUF_LEN]; + int ret, result = -1; + + if (physical_dev == NULL) { + ubcore_log_err("Failed find physical device"); + return -1; + } + + session = create_session_for_exchange_udata(physical_dev, &result, buf, + sizeof(buf)); + if (session == NULL) { + ret = -ENOMEM; + goto put_device; + } + + req.ubva = seg->ubva; + req.len = seg->len; + req.token_id = seg->token_id; + ret = send_seg_info_req(physical_dev, ubcore_session_get_id(session), + &req); + if (ret != 0) { + ubcore_log_err("Failed to send create req message"); + ubcore_session_complete(session); + goto release_session; + } + ubcore_session_wait(session); + + if (result != 0) { + ubcore_log_err("Failed to exchange udata, ret: %d.\n", result); + ret = result; + goto release_session; + } + + ret = copy_to_user((void __user *)udata->udrv_data->out_addr, buf, + udata->udrv_data->out_len); + if (ret != 0) { + ubcore_log_err("Failed to copy to user, ret: %d.\n", ret); + goto release_session; + } + + ubcore_session_ref_release(session); + ubcore_put_device(physical_dev); + return 0; + +release_session: + ubcore_session_ref_release(session); +put_device: + ubcore_put_device(physical_dev); + return ret; +} + +int ubcore_connect_exchange_udata_when_import_jetty( + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata, bool is_jfr) +{ + struct ubcore_device *physical_dev = ubcore_find_physical_device(); + struct msg_jetty_info_req req = { 0 }; + struct ubcore_session *session; + char buf[BONDING_UDATA_BUF_LEN]; + int ret, result = -1; + + if (physical_dev == NULL) { + ubcore_log_err("Failed find physical device"); + return -1; + } + + session = create_session_for_exchange_udata(physical_dev, &result, buf, + sizeof(buf)); + if (session == NULL) { + ret = -ENOMEM; + goto put_device; + } + + req.is_jfr = is_jfr; + req.jetty_id = cfg->id; + ret = send_jetty_info_req(physical_dev, ubcore_session_get_id(session), + &req); + if (ret != 0) { + ubcore_log_err("Failed to send create req message"); + ubcore_session_complete(session); + goto release_session; + } + ubcore_session_wait(session); + + if (result != 0) { + ubcore_log_err("Failed to exchange udata, ret: %d.\n", result); + ret = result; + goto release_session; + } + + ret = copy_to_user((void __user *)udata->udrv_data->out_addr, buf, + udata->udrv_data->out_len); + if (ret != 0) { + ubcore_log_err("Failed to copy to user, ret: %d.\n", ret); + goto release_session; + } + + ubcore_session_ref_release(session); + ubcore_put_device(physical_dev); + return 0; + +release_session: + ubcore_session_ref_release(session); +put_device: + ubcore_put_device(physical_dev); + return ret; +} + +static void handle_seg_info_req(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_seg_info_req *req = (struct msg_seg_info_req *)msg->data; + struct ubcore_device *bonding_dev = ubcore_find_bonding_device(); + int ret = 0; + + struct msg_seg_info_resp resp = { 0 }; + struct ubcore_user_ctl k_user_ctl = { + .in.opcode = 5, + .in.addr = (uint64_t)req, + .in.len = sizeof(*req), + .out.addr = (uint64_t)(&resp.seg_info), + .out.len = sizeof(resp.seg_info), + }; + + ret = ubcore_user_control(bonding_dev, &k_user_ctl); + if (ret != 0) + ubcore_log_err("Failed to get seg info by user ctl"); + + resp.result = ret; + if (send_seg_info_resp(dev, conn, msg->session_id, &resp) != 0) + ubcore_log_err("Failed to send create resp message.\n"); +} + +static void handle_jetty_info_req(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_jetty_info_req *req = (struct msg_jetty_info_req *)msg->data; + struct ubcore_device *bonding_dev = ubcore_find_bonding_device(); + int ret = 0; + + struct msg_jetty_info_resp resp = { 0 }; + struct ubcore_user_ctl k_user_ctl = { + .in.opcode = 6, + .in.addr = (uint64_t)req, + .in.len = sizeof(*req), + .out.addr = (uint64_t)(&resp.jetty_info), + .out.len = sizeof(resp.jetty_info), + }; + + ret = ubcore_user_control(bonding_dev, &k_user_ctl); + if (ret != 0) + ubcore_log_err("Failed to get jetty info by user ctl"); + + resp.result = ret; + if (send_jetty_info_resp(dev, conn, msg->session_id, &resp) != 0) + ubcore_log_err("Failed to send create resp message.\n"); +} + +static void handle_exchange_udata_resp(struct ubcore_device *dev, void *conn, + uint32_t session_id, int result, + void *data) +{ + struct ubcore_session *session; + struct session_data_exchange_udata *session_data; + + session = ubcore_session_find(session_id); + if (session == NULL) { + ubcore_log_err( + "Failed to find session %u on handle bonding-seg-info-req", + session_id); + return; + } + session_data = + (struct session_data_exchange_udata *)ubcore_session_get_data( + session); + + if (result != 0) { + *session_data->result = result; + ubcore_log_err("Failed to exchange udata, ret: %d.\n", result); + goto complete_session; + } + + memcpy(session_data->udata_out, data, session_data->udata_out_size); + *session_data->result = 0; + ubcore_log_info("Create response result: %d.\n", result); + +complete_session: + ubcore_session_complete(session); + ubcore_session_ref_release(session); +} + +static void handle_seg_info_resp(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_seg_info_resp *resp = (struct msg_seg_info_resp *)msg->data; + + handle_exchange_udata_resp(dev, conn, msg->session_id, resp->result, + resp->seg_info); +} + +static void handle_jetty_info_resp(struct ubcore_device *dev, + struct ubcore_net_msg *msg, void *conn) +{ + struct msg_jetty_info_resp *resp = + (struct msg_jetty_info_resp *)msg->data; + + handle_exchange_udata_resp(dev, conn, msg->session_id, resp->result, + &resp->jetty_info); +} + +void ubcore_connect_bonding_init(void) +{ + ubcore_net_register_msg_handler(UBCORE_NET_BONDING_SEG_INFO_REQ, + handle_seg_info_req, + sizeof(struct msg_seg_info_req)); + ubcore_net_register_msg_handler(UBCORE_NET_BONDING_SEG_INFO_RESP, + handle_seg_info_resp, + sizeof(struct msg_seg_info_resp)); + ubcore_net_register_msg_handler(UBCORE_NET_BONDING_JETTY_INFO_REQ, + handle_jetty_info_req, + sizeof(struct msg_jetty_info_req)); + ubcore_net_register_msg_handler(UBCORE_NET_BONDING_JETTY_INFO_RESP, + handle_jetty_info_resp, + sizeof(struct msg_jetty_info_resp)); +} diff --git a/drivers/ub/urma/ubcore/ubcore_connect_bonding.h b/drivers/ub/urma/ubcore/ubcore_connect_bonding.h new file mode 100644 index 000000000000..352bde87fd1c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_connect_bonding.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore connect bonding header file + * Author: Wang Hang + * Create: 2025-08-07 + * Note: + * History: 2025-08-07: create file + */ + +#ifndef UBCORE_CONNECT_BONDING_H +#define UBCORE_CONNECT_BONDING_H + +#include + +#define UBAGG_DEV_PREFIX "bonding_dev" + +static inline bool ubcore_is_bonding_dev(struct ubcore_device *dev) +{ + return memcmp(dev->dev_name, UBAGG_DEV_PREFIX, + strlen(UBAGG_DEV_PREFIX)) == 0; +} + +int ubcore_connect_exchange_udata_when_import_seg(struct ubcore_seg *seg, + struct ubcore_udata *udata); + +int ubcore_connect_exchange_udata_when_import_jetty( + struct ubcore_tjetty_cfg *cfg, struct ubcore_udata *udata, bool is_jfr); + +void ubcore_connect_bonding_init(void); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_ctp.c b/drivers/ub/urma/ubcore/ubcore_ctp.c new file mode 100644 index 000000000000..858f83459a07 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_ctp.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore ctp implementation + * Author: Xu Zhicong + * Create: 2023-10-12 + * Note: + * History: 2023-10-12: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_ctp.h" + +struct ubcore_ctp *ubcore_create_ctp(struct ubcore_device *dev, + struct ubcore_ctp_cfg *cfg) +{ + struct ubcore_ctp *ctp; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->create_ctp == NULL) + return NULL; + + ctp = dev->ops->create_ctp(dev, cfg, NULL); + if (ctp == NULL) { + ubcore_log_err("Failed to create ctp"); + return NULL; + } + ctp->ub_dev = dev; + ctp->ctp_cfg = *cfg; + atomic_set(&ctp->use_cnt, 0); + kref_init(&ctp->ref_cnt); + init_completion(&ctp->comp); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_CTP], &ctp->hnode, + ctp->ctpn); + if (ret != 0) { + (void)dev->ops->destroy_ctp(ctp); + ctp = NULL; + ubcore_log_err("Failed to add ctp to the ctp table"); + return ctp; + } + + ubcore_log_info("Success to create ctp, ctp_idx %u", ctp->ctpn); + return ctp; +} + +static void ubcore_ctp_kref_release(struct kref *ref_cnt) +{ + struct ubcore_ctp *ctp = + container_of(ref_cnt, struct ubcore_ctp, ref_cnt); + + complete(&ctp->comp); +} + +void ubcore_ctp_kref_put(struct ubcore_ctp *ctp) +{ + (void)kref_put(&ctp->ref_cnt, ubcore_ctp_kref_release); +} + +void ubcore_ctp_get(void *obj) +{ + struct ubcore_ctp *ctp = obj; + + kref_get(&ctp->ref_cnt); +} + +int ubcore_destroy_ctp(struct ubcore_ctp *ctp) +{ + struct ubcore_device *dev = ctp->ub_dev; + uint32_t ctp_idx = ctp->ctpn; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->destroy_ctp == NULL) + return -EINVAL; + + ubcore_ctp_kref_put(ctp); + wait_for_completion(&ctp->comp); + ret = dev->ops->destroy_ctp(ctp); + if (ret != 0) { + (void)ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_CTP], + &ctp->hnode, ctp->ctpn); + /* inc ctp use cnt? */ + ubcore_log_err("Failed to destroy ctp"); + return ret; + } + + ubcore_log_info("Success to destroy ctp, ctp_idx %u", ctp_idx); + return ret; +} + +struct ubcore_ctp *ubcore_find_ctp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_CTP], idx, &idx); +} + +struct ubcore_ctp *ubcore_find_get_ctp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_CTP], idx, &idx); +} + +struct ubcore_ctp *ubcore_find_remove_ctp(struct ubcore_device *dev, + uint32_t idx) +{ + struct ubcore_ctp *ctp; + + spin_lock(&dev->ht[UBCORE_HT_CTP].lock); + if (dev->ht[UBCORE_HT_CTP].head == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_CTP].lock); + return NULL; + } + ctp = ubcore_hash_table_lookup_nolock(&dev->ht[UBCORE_HT_CTP], idx, + &idx); + if (ctp == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_CTP].lock); + return NULL; + } + if (atomic_read(&ctp->use_cnt) > 0) { + spin_unlock(&dev->ht[UBCORE_HT_CTP].lock); + ubcore_log_err("Failed to remove ctp"); + return NULL; + } + ubcore_hash_table_remove_nolock(&dev->ht[UBCORE_HT_CTP], &ctp->hnode); + spin_unlock(&dev->ht[UBCORE_HT_CTP].lock); + + return ctp; +} diff --git a/drivers/ub/urma/ubcore/ubcore_ctp.h b/drivers/ub/urma/ubcore/ubcore_ctp.h new file mode 100644 index 000000000000..caef08062f0b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_ctp.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore ctp header + * Author: Xu Zhicong + * Create: 2023-10-12 + * Note: + * History: 2023-10-12: Create file + */ +#ifndef UBCORE_CTP_H +#define UBCORE_CTP_H + +#include + +struct ubcore_ctp *ubcore_create_ctp(struct ubcore_device *dev, + struct ubcore_ctp_cfg *cfg); +int ubcore_destroy_ctp(struct ubcore_ctp *ctp); +struct ubcore_ctp *ubcore_find_ctp(struct ubcore_device *dev, uint32_t idx); +struct ubcore_ctp *ubcore_find_get_ctp(struct ubcore_device *dev, uint32_t idx); +struct ubcore_ctp *ubcore_find_remove_ctp(struct ubcore_device *dev, + uint32_t idx); +void ubcore_ctp_get(void *obj); +void ubcore_ctp_kref_put(struct ubcore_ctp *ctp); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c new file mode 100644 index 000000000000..36706b1553e5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -0,0 +1,3191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device add and remove ops file + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net/ubcore_session.h" +#include +#include "ubcore_log.h" +#include +#include +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" +#include +#include "ubcore_msg.h" +#include "ubcore_netdev.h" +#include "ubcore_utp.h" +#include "ubcore_vtp.h" +#include "ubcore_tpg.h" +#include "ubcore_ctp.h" +#include "ubcore_netlink.h" +#include "ubcore_workqueue.h" +#include "ubcore_cdev_file.h" +#include "ubcore_uvs_cmd.h" +#include "ubcore_cmd.h" +#include "ubcore_uvs.h" +#include "ubcore_main.h" +#include "ubcore_connect_adapter.h" +#include "net/ubcore_cm.h" +#include "ubcore_device.h" + +#define UBCORE_DEVICE_NAME "ubcore" + +struct ubcore_ctx { + dev_t ubcore_devno; + struct cdev ubcore_cdev; + struct device *ubcore_dev; +}; + +static LIST_HEAD(g_mue_cdev_list); +static DECLARE_RWSEM(g_mue_cdev_rwsem); + +static dev_t g_dynamic_mue_devnum; + +static LIST_HEAD(g_client_list); +static LIST_HEAD(g_device_list); + +/* + * g_device_rwsem and g_lists_rwsem protect both g_device_list and g_client_list. + * g_device_rwsem protects writer access by device and client + * g_lists_rwsem protects reader access to these lists. + * Iterators of these lists must lock it for read, while updates + * to the lists must be done with a write lock. + */ +static DECLARE_RWSEM(g_device_rwsem); + +/* + * g_clients_rwsem protect g_client_list. + */ +static DECLARE_RWSEM(g_clients_rwsem); +static struct ubcore_device *g_ub_mue; +static DEFINE_MUTEX(g_upi_lock); +static LIST_HEAD(g_upi_list); + +static unsigned int g_ubcore_net_id; +static LIST_HEAD(g_ubcore_net_list); +static DEFINE_SPINLOCK(g_ubcore_net_lock); +static DECLARE_RWSEM(g_ubcore_net_rwsem); + +static bool g_shared_ns = true; + +static struct ubcore_ctx g_ubcore_ctx = { 0 }; + +void ubcore_uvs_release_sip_list(struct ubcore_uvs_instance *uvs) +{ + struct sip_idx_node *cur, *nex; + + list_for_each_entry_safe(cur, nex, &uvs->sip_list, node) { + ubcore_delete_sip(cur->sip_info); + } + ubcore_log_info("sip list is empty\n"); +} + +static void ubcore_global_release_file(struct kref *ref) +{ + struct ubcore_global_file *file; + + ubcore_log_info("release ubcore global file.\n"); + file = container_of(ref, struct ubcore_global_file, ref); + if (file->uvs != NULL) { + ubcore_uvs_release_sip_list(file->uvs); + ubcore_uvs_release_nl_buffer(file->uvs); + ubcore_uvs_kref_put(file->uvs); + file->uvs = NULL; + } + + kfree(file); +} + +static int ubcore_global_open(struct inode *i_node, struct file *filp) +{ + struct ubcore_global_file *file; + + file = kzalloc(sizeof(struct ubcore_global_file), GFP_KERNEL); + if (file == NULL) + return -ENOMEM; + + kref_init(&file->ref); + filp->private_data = file; + + ubcore_log_info("open ubcore global file succeed.\n"); + return 0; +} + +static long ubcore_global_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ubcore_cmd_hdr hdr; + struct ubcore_global_file *file; + int ret; + + if (filp == NULL || filp->private_data == NULL) { + ubcore_log_err("invalid param"); + return -EINVAL; + } + + file = filp->private_data; + + if (cmd == UBCORE_UVS_CMD) { + ret = ubcore_copy_from_user(&hdr, (void *)arg, + sizeof(struct ubcore_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBCORE_MAX_CMD_SIZE)) { + ubcore_log_err( + "length of ioctl input parameter is out of range.\n"); + return -EINVAL; + } + + if ((hdr.args_len == 0) || (hdr.args_addr == 0)) { + ubcore_log_err( + "hdr args len and args addr can't be 0.\n"); + return -EINVAL; + } + + kref_get(&file->ref); + ret = ubcore_uvs_global_cmd_parse(file, &hdr); + kref_put(&file->ref, ubcore_global_release_file); + return ret; + } + + ubcore_log_err("bad ioctl command.\n"); + return -ENOIOCTLCMD; +} + +static int ubcore_global_close(struct inode *i_node, struct file *filp) +{ + struct ubcore_global_file *file = filp->private_data; + struct ubcore_uvs_instance *ins; + + ubcore_log_info("closing ubcore global device.\n"); + if (file->uvs != NULL) { + ins = file->uvs; + ins->state = UBCORE_UVS_STATE_DEAD; + ubcore_log_info( + "set uvs %s with id %u to dead in ubcore global close\n", + ins->name, ins->id); + } + + kref_put(&file->ref, ubcore_global_release_file); + + return 0; +} + +static const struct file_operations g_ubcore_global_ops = { + .owner = THIS_MODULE, + .open = ubcore_global_open, + .release = ubcore_global_close, + .unlocked_ioctl = ubcore_global_ioctl, + .compat_ioctl = ubcore_global_ioctl, +}; + +static const void *ubcore_net_namespace(const struct device *dev) + +{ + struct ubcore_logic_device *ldev = dev_get_drvdata(dev); + struct ubcore_device *ubc_dev; + + if (ldev == NULL || ldev->ub_dev == NULL) { + ubcore_log_info("init net %pK", ldev); + return &init_net; + } + + ubc_dev = ldev->ub_dev; + if (ubc_dev->transport_type == UBCORE_TRANSPORT_UB) + return read_pnet(&ldev->net); + else + return &init_net; +} + +static char *ubcore_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBCORE_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "ubcore/%s", dev_name(dev)); +} + +static struct class g_ubcore_class = { .name = "ubcore", + .devnode = ubcore_devnode, + .ns_type = &net_ns_type_operations, + .namespace = ubcore_net_namespace }; + +struct ubcore_net { + possible_net_t net; + struct list_head node; +}; + +struct ubcore_upi_entry { + struct ubcore_device *dev; + uint32_t upi; + struct list_head node; +}; + +struct ubcore_event_work { + struct work_struct work; + struct ubcore_event event; +}; + +void ubcore_set_client_ctx_data(struct ubcore_device *dev, + struct ubcore_client *client, void *data) +{ + struct ubcore_client_ctx *ctx; + + if (dev == NULL || client == NULL || client->client_name == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + strnlen(client->client_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("dev or client is null"); + return; + } + + down_read(&dev->client_ctx_rwsem); + list_for_each_entry(ctx, &dev->client_ctx_list, list_node) { + if (ctx->client == client) { + ctx->data = data; + goto out; + } + } + ubcore_log_err( + "no client ctx found, device_name: %s, client_name: %s.\n", + dev->dev_name, client->client_name); + +out: + up_read(&dev->client_ctx_rwsem); +} +EXPORT_SYMBOL(ubcore_set_client_ctx_data); + +static struct ubcore_client_ctx * +ubcore_lookup_client_context(struct ubcore_device *dev, + struct ubcore_client *client) +{ + struct ubcore_client_ctx *found_ctx = NULL; + struct ubcore_client_ctx *ctx, *tmp; + + if (dev == NULL || client == NULL) { + ubcore_log_err("dev is null"); + return NULL; + } + + down_read(&dev->client_ctx_rwsem); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client == client) { + found_ctx = ctx; + break; + } + } + up_read(&dev->client_ctx_rwsem); + return found_ctx; +} + +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, + struct ubcore_client *client) +{ + struct ubcore_client_ctx *found_ctx = NULL; + + if (dev == NULL || client == NULL || client->client_name == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + strnlen(client->client_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("dev or client is null"); + return NULL; + } + + found_ctx = ubcore_lookup_client_context(dev, client); + if (found_ctx == NULL) { + ubcore_log_warn( + "no client ctx found, dev_name: %s, client_name: %s.\n", + dev->dev_name, client->client_name); + return NULL; + } else { + return found_ctx->data; + } +} +EXPORT_SYMBOL(ubcore_get_client_ctx_data); + +static int create_client_ctx(struct ubcore_device *dev, + struct ubcore_client *client) +{ + struct ubcore_client_ctx *ctx; + + ctx = kmalloc(sizeof(struct ubcore_client_ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->data = NULL; + ctx->client = client; + + down_write(&dev->client_ctx_rwsem); + list_add(&ctx->list_node, &dev->client_ctx_list); + downgrade_write(&dev->client_ctx_rwsem); + if (client->add && client->add(dev) != 0) { + list_del(&ctx->list_node); + kfree(ctx); + up_read(&dev->client_ctx_rwsem); + return -EPERM; + } + up_read(&dev->client_ctx_rwsem); + return 0; +} + +static void destroy_client_ctx(struct ubcore_device *dev, + struct ubcore_client_ctx *ctx) +{ + if (dev == NULL || ctx == NULL) + return; + + down_write(&dev->client_ctx_rwsem); + list_del(&ctx->list_node); + kfree(ctx); + up_write(&dev->client_ctx_rwsem); +} + +int ubcore_register_client(struct ubcore_client *new_client) +{ + struct ubcore_device *dev; + + if (new_client == NULL || new_client->client_name == NULL || + new_client->add == NULL || new_client->remove == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -1; + } + + if (strnlen(new_client->client_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("Invalid parameter, client name.\n"); + return -1; + } + + down_write(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (create_client_ctx(dev, new_client) != 0) + ubcore_log_warn( + "ubcore device: %s add client:%s context failed.\n", + dev->dev_name, new_client->client_name); + } + down_write(&g_clients_rwsem); + list_add_tail(&new_client->list_node, &g_client_list); + up_write(&g_clients_rwsem); + + up_write(&g_device_rwsem); + + ubcore_log_info("ubcore client: %s register success.\n", + new_client->client_name); + return 0; +} +EXPORT_SYMBOL(ubcore_register_client); + +void ubcore_unregister_client(struct ubcore_client *rm_client) +{ + struct ubcore_client_ctx *found_ctx = NULL; + struct ubcore_device *dev; + + if (rm_client == NULL || rm_client->client_name == NULL || + rm_client->add == NULL || rm_client->remove == NULL) { + ubcore_log_err("Invalid parameter"); + return; + } + if (strnlen(rm_client->client_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("Invalid parameter, client name.\n"); + return; + } + + down_write(&g_device_rwsem); + + down_write(&g_clients_rwsem); + list_del(&rm_client->list_node); + up_write(&g_clients_rwsem); + + downgrade_write(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + found_ctx = ubcore_lookup_client_context(dev, rm_client); + if (found_ctx == NULL) { + ubcore_log_warn( + "no client ctx found, dev_name: %s, client_name: %s.\n", + dev->dev_name, rm_client->client_name); + continue; + } + if (rm_client->remove) + rm_client->remove(dev, found_ctx->data); + + destroy_client_ctx(dev, found_ctx); + ubcore_log_info( + "dev remove client, dev_name: %s, client_name: %s.\n", + dev->dev_name, rm_client->client_name); + } + + up_read(&g_device_rwsem); + ubcore_log_info("ubcore client: %s unregister success.\n", + rm_client->client_name); +} +EXPORT_SYMBOL(ubcore_unregister_client); + +struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, + enum ubcore_transport_type type) +{ + struct ubcore_device *dev, *target = NULL; + uint32_t idx; + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) + continue; + for (idx = 0; idx < dev->attr.dev_cap.max_eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0 && + dev->transport_type == type) { + target = dev; + ubcore_get_device(target); + break; + } + } + if (target != NULL) + break; + } + up_read(&g_device_rwsem); + return target; +} + +struct ubcore_device *ubcore_find_device_with_name(const char *dev_name) +{ + struct ubcore_device *dev, *target = NULL; + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (strcmp(dev->dev_name, dev_name) == 0) { + target = dev; + ubcore_get_device(target); + break; + } + } + up_read(&g_device_rwsem); + return target; +} + +bool ubcore_check_dev_is_exist(const char *dev_name) +{ + struct ubcore_device *dev = NULL; + + dev = ubcore_find_device_with_name(dev_name); + if (dev != NULL) + ubcore_put_device(dev); + + return dev != NULL ? true : false; +} + +int ubcore_find_upi_with_dev_name(const char *dev_name, uint32_t *upi) +{ + struct ubcore_upi_entry *entry = NULL; + int ret = -1; + + mutex_lock(&g_upi_lock); + list_for_each_entry(entry, &g_upi_list, node) { + if (entry != NULL && + strcmp(entry->dev->dev_name, dev_name) == 0) { + *upi = entry->upi; + ret = 0; + break; + } + } + mutex_unlock(&g_upi_lock); + return ret; +} + +int ubcore_add_upi_list(struct ubcore_device *dev, uint32_t upi) +{ + struct ubcore_upi_entry *entry = NULL, *new_entry = NULL; + + mutex_lock(&g_upi_lock); + list_for_each_entry(entry, &g_upi_list, node) { + if (entry != NULL && entry->dev == dev) { + entry->upi = upi; + mutex_unlock(&g_upi_lock); + return 0; + } + } + mutex_unlock(&g_upi_lock); + + new_entry = kzalloc(sizeof(struct ubcore_upi_entry), GFP_KERNEL); + if (new_entry == NULL) + return -ENOMEM; + + new_entry->dev = dev; + new_entry->upi = upi; + + mutex_lock(&g_upi_lock); + list_add_tail(&new_entry->node, &g_upi_list); + mutex_unlock(&g_upi_lock); + ubcore_log_info("add dev_name: %s, upi: 0x%x to upi list\n", + dev->dev_name, upi); + return 0; +} + +void ubcore_destroy_upi_list(struct ubcore_device *dev) +{ + struct ubcore_upi_entry *entry = NULL, *next; + + mutex_lock(&g_upi_lock); + list_for_each_entry_safe(entry, next, &g_upi_list, node) { + if (entry != NULL && entry->dev == dev) { + list_del(&entry->node); + kfree(entry); + break; + } + } + mutex_unlock(&g_upi_lock); +} + +static bool ubcore_netdev_in_ubdev(struct ubcore_device *dev, + struct net_device *netdev) +{ + struct net_device *real_netdev = NULL; + + if (dev->transport_type == UBCORE_TRANSPORT_UB && is_vlan_dev(netdev)) + real_netdev = vlan_dev_real_dev(netdev); + else + real_netdev = netdev; + + return (dev->netdev == real_netdev); +} + +struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, + uint32_t *cnt) +{ + struct ubcore_device **devices; + struct ubcore_device *dev; + uint32_t i = 0; + + if (!netdev) { + ubcore_log_err("Invalid netdev.\n"); + *cnt = 0; + return NULL; + } + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + /* Assume netdev is related to the first and only port */ + if (ubcore_netdev_in_ubdev(dev, netdev)) + i++; + } + up_read(&g_device_rwsem); + + if (i == 0) { + *cnt = 0; + return NULL; + } + + devices = kzalloc(i * sizeof(struct ubcore_device *), GFP_ATOMIC); + if (devices == NULL) { + *cnt = 0; + return NULL; + } + + *cnt = i; + i = 0; + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (ubcore_netdev_in_ubdev(dev, netdev)) { + ubcore_get_device(dev); + devices[i] = dev; + i++; + } + } + up_read(&g_device_rwsem); + + return devices; +} + +void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt) +{ + uint32_t i; + + if (devices == NULL) + return; + + for (i = 0; i < cnt; i++) + ubcore_put_device(devices[i]); + + kfree(devices); +} + +void ubcore_get_device(struct ubcore_device *dev) +{ + if (IS_ERR_OR_NULL(dev)) { + ubcore_log_err("Invalid parameter\n"); + return; + } + + atomic_inc(&dev->use_cnt); +} + +void ubcore_put_device(struct ubcore_device *dev) +{ + if (IS_ERR_OR_NULL(dev)) { + ubcore_log_err("Invalid parameter\n"); + return; + } + + if (atomic_dec_and_test(&dev->use_cnt)) + complete(&dev->comp); +} + +struct ubcore_device * +ubcore_find_mue_device_legacy(enum ubcore_transport_type type) +{ + if (g_ub_mue == NULL) { + ubcore_log_info("mue is not registered yet\n"); + return NULL; + } + + if (g_ub_mue->transport_type != type) { + ubcore_log_info("mue of tran type:%d, not registered yet\n", + (int)type); + return NULL; + } + + ubcore_get_device(g_ub_mue); + return g_ub_mue; +} + +struct ubcore_device *ubcore_find_mue_by_dev(struct ubcore_device *dev) +{ + if (dev == NULL) + return NULL; + + if (dev->attr.tp_maintainer) { + ubcore_get_device(dev); + return dev; + } + + return ubcore_find_mue_device_legacy(dev->transport_type); +} + +struct ubcore_device *ubcore_find_mue_device_by_name(char *dev_name) +{ + struct ubcore_device *dev; + + dev = ubcore_find_device_with_name(dev_name); + if (dev == NULL) { + ubcore_log_err("can not find dev by name:%s", dev_name); + return NULL; + } + + if (dev->attr.tp_maintainer) + return dev; + + ubcore_log_err("dev:%s is not mue", dev_name); + ubcore_put_device(dev); + return NULL; +} + +struct ubcore_device ** +ubcore_get_all_mue_device(enum ubcore_transport_type type, uint32_t *dev_cnt) +{ + struct ubcore_device **dev_list; + struct ubcore_device *dev; + uint32_t count = 0; + int i = 0; + + *dev_cnt = 0; + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->attr.tp_maintainer && dev->transport_type == type) + ++count; + } + + if (count == 0) { + up_read(&g_device_rwsem); + return NULL; + } + + dev_list = kcalloc(count, sizeof(struct ubcore_device *), GFP_KERNEL); + if (dev_list == NULL) { + up_read(&g_device_rwsem); + return NULL; + } + + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->attr.tp_maintainer && dev->transport_type == type) { + dev_list[i++] = dev; + ubcore_get_device(dev); + } + } + *dev_cnt = count; + up_read(&g_device_rwsem); + + return dev_list; +} + +struct ubcore_device * +ubcore_find_mue_device(union ubcore_net_addr_union *netaddr, + enum ubcore_transport_type type) +{ + if (netaddr == NULL) + return NULL; + + return ubcore_lookup_mue_by_sip_addr(netaddr, type); +} + +static void ubcore_free_driver_obj(void *obj) +{ + // obj alloced by driver, should not free by ubcore + ubcore_log_err("obj was not free correctly!"); +} + +static struct ubcore_ht_param g_ht_params[] = { + [UBCORE_HT_JFS] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_jfs, hnode), + offsetof(struct ubcore_jfs, jfs_id) + + offsetof(struct ubcore_jetty_id, id), + sizeof(uint32_t), NULL, ubcore_free_driver_obj, + ubcore_jfs_get }, + + [UBCORE_HT_JFR] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_jfr, hnode), + offsetof(struct ubcore_jfr, jfr_id) + + offsetof(struct ubcore_jetty_id, id), + sizeof(uint32_t), NULL, ubcore_free_driver_obj, + ubcore_jfr_get }, + [UBCORE_HT_JFC] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_jfc, hnode), + offsetof(struct ubcore_jfc, id), sizeof(uint32_t), + NULL, ubcore_free_driver_obj, NULL }, + + [UBCORE_HT_JETTY] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_jetty, hnode), + offsetof(struct ubcore_jetty, jetty_id) + + offsetof(struct ubcore_jetty_id, id), + sizeof(uint32_t), NULL, ubcore_free_driver_obj, + ubcore_jetty_get }, + + [UBCORE_HT_TP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_tp, hnode), + offsetof(struct ubcore_tp, tpn), sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_tp_get }, + + [UBCORE_HT_TPG] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_tpg, hnode), + offsetof(struct ubcore_tpg, tpgn), sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_tpg_get }, + + /* key: seid + deid */ + [UBCORE_HT_RM_VTP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + + offsetof(struct ubcore_vtp_cfg, + local_eid), + sizeof(union ubcore_eid) * 2, NULL, + ubcore_free_driver_obj, ubcore_vtp_get }, + + /* key: deid + djetty */ + [UBCORE_HT_RC_VTP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + + offsetof(struct ubcore_vtp_cfg, + peer_eid), + sizeof(union ubcore_eid) + sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_vtp_get }, + + /* key: seid + deid */ + [UBCORE_HT_UM_VTP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtp, hnode), + offsetof(struct ubcore_vtp, cfg) + + offsetof(struct ubcore_vtp_cfg, + local_eid), + sizeof(union ubcore_eid) * 2, NULL, + ubcore_free_driver_obj, ubcore_vtp_get }, + + /* key: src_eid + des_eid */ + [UBCORE_HT_RM_VTPN] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtpn, hnode), + offsetof(struct ubcore_vtpn, local_eid), + 2 * sizeof(union ubcore_eid), NULL, + ubcore_free_driver_obj, ubcore_vtpn_get }, + + /* key: src_eid + des_eid + src_jetty + des_jetty */ + [UBCORE_HT_RC_VTPN] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtpn, hnode), + offsetof(struct ubcore_vtpn, local_eid), + 2 * sizeof(union ubcore_eid) + + 2 * sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_vtpn_get }, + + /* key: src_eid + des_eid */ + [UBCORE_HT_UM_VTPN] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtpn, hnode), + offsetof(struct ubcore_vtpn, local_eid), + 2 * sizeof(union ubcore_eid), NULL, + ubcore_free_driver_obj, ubcore_vtpn_get }, + + /* key: currently tp_handle */ + [UBCORE_HT_CP_VTPN] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtpn, hnode), + offsetof(struct ubcore_vtpn, tp_handle), + sizeof(uint64_t), NULL, ubcore_free_driver_obj, + ubcore_vtpn_get }, + + /* key: vtpn */ + [UBCORE_HT_VTPN] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_vtpn, vtpn_hnode), + offsetof(struct ubcore_vtpn, vtpn), + sizeof(uint32_t), NULL, ubcore_free_driver_obj, + ubcore_vtpn_get }, + + /* key: utp idx */ + [UBCORE_HT_UTP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_utp, hnode), + offsetof(struct ubcore_utp, utpn), sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_utp_get }, + + /* key: ctp idx */ + [UBCORE_HT_CTP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_ctp, hnode), + offsetof(struct ubcore_ctp, ctpn), sizeof(uint32_t), + NULL, ubcore_free_driver_obj, ubcore_ctp_get }, + + [UBCORE_HT_EX_TP] = { UBCORE_HASH_TABLE_SIZE, + offsetof(struct ubcore_ex_tp_info, hnode), + offsetof(struct ubcore_ex_tp_info, tp_handle), + sizeof(uint64_t), NULL, ubcore_free_driver_obj, + NULL }, +}; + +static inline void ubcore_set_vtp_hash_table_size(uint32_t vtp_size) +{ + if (vtp_size == 0 || vtp_size > UBCORE_HASH_TABLE_SIZE) + return; + g_ht_params[UBCORE_HT_RM_VTP].size = vtp_size; + g_ht_params[UBCORE_HT_RC_VTP].size = vtp_size; + g_ht_params[UBCORE_HT_UM_VTP].size = vtp_size; +} + +static inline void ubcore_set_vtpn_hash_table_size(uint32_t vtpn_size) +{ + if (vtpn_size == 0 || vtpn_size > UBCORE_HASH_TABLE_SIZE) + return; + g_ht_params[UBCORE_HT_RM_VTPN].size = vtpn_size; + g_ht_params[UBCORE_HT_RC_VTPN].size = vtpn_size; + g_ht_params[UBCORE_HT_UM_VTPN].size = vtpn_size; + g_ht_params[UBCORE_HT_CP_VTPN].size = vtpn_size; + g_ht_params[UBCORE_HT_VTPN].size = vtpn_size; +} + +static void ubcore_update_hash_tables_size(const struct ubcore_device_cap *cap) +{ + if (cap->max_jfs != 0 && cap->max_jfs < g_ht_params[UBCORE_HT_JFS].size) + g_ht_params[UBCORE_HT_JFS].size = cap->max_jfs; + if (cap->max_jfr != 0 && cap->max_jfr < g_ht_params[UBCORE_HT_JFR].size) + g_ht_params[UBCORE_HT_JFR].size = cap->max_jfr; + if (cap->max_jfc != 0 && cap->max_jfc < g_ht_params[UBCORE_HT_JFC].size) + g_ht_params[UBCORE_HT_JFC].size = cap->max_jfc; + if (cap->max_jetty != 0 && + cap->max_jetty < g_ht_params[UBCORE_HT_JETTY].size) + g_ht_params[UBCORE_HT_JETTY].size = cap->max_jetty; + if (cap->max_tp_cnt != 0 && + cap->max_tp_cnt < g_ht_params[UBCORE_HT_TP].size) + g_ht_params[UBCORE_HT_TP].size = cap->max_tp_cnt; + if (cap->max_tpg_cnt != 0 && + cap->max_tpg_cnt < g_ht_params[UBCORE_HT_TPG].size) + g_ht_params[UBCORE_HT_TPG].size = cap->max_tpg_cnt; + if (cap->max_vtp_cnt_per_ue < UBCORE_HASH_TABLE_SIZE && + cap->max_ue_cnt < UBCORE_HASH_TABLE_SIZE) + ubcore_set_vtp_hash_table_size( + (cap->max_vtp_cnt_per_ue * cap->max_ue_cnt)); + ubcore_set_vtpn_hash_table_size(cap->max_vtp_cnt_per_ue); + + if (cap->max_utp_cnt != 0 && + cap->max_utp_cnt < g_ht_params[UBCORE_HT_UTP].size) + g_ht_params[UBCORE_HT_UTP].size = cap->max_utp_cnt; + /* ctp size use max_tp_cnt */ + if (cap->max_tp_cnt != 0 && + cap->max_tp_cnt < g_ht_params[UBCORE_HT_CTP].size) + g_ht_params[UBCORE_HT_CTP].size = cap->max_tp_cnt; +} + +static int ubcore_alloc_hash_tables(struct ubcore_device *dev) +{ + uint32_t i, j; + int ret; + + ubcore_update_hash_tables_size(&dev->attr.dev_cap); + for (i = 0; i < ARRAY_SIZE(g_ht_params); i++) { + ret = ubcore_hash_table_alloc(&dev->ht[i], &g_ht_params[i]); + if (ret != 0) { + ubcore_log_err("alloc hash tables failed.\n"); + goto free_tables; + } + } + + return 0; + +free_tables: + for (j = 0; j < i; j++) + ubcore_hash_table_free(&dev->ht[j]); + return -1; +} + +static void ubcore_destroy_vtp_in_unreg_dev(void *arg) +{ + struct ubcore_vtp *vtp = (struct ubcore_vtp *)arg; + + if (vtp->cfg.vtpn != UINT_MAX && vtp->ub_dev->ops->destroy_vtp != NULL) + (void)vtp->ub_dev->ops->destroy_vtp(vtp); + else + kfree(vtp); +} + +static void ubcore_destroy_tp_in_unreg_dev(void *arg) +{ + struct ubcore_tp *tp = (struct ubcore_tp *)arg; + + if (tp->ub_dev->ops->destroy_tp != NULL) + (void)tp->ub_dev->ops->destroy_tp(tp); +} + +static void ubcore_destroy_utp_in_unreg_dev(void *arg) +{ + struct ubcore_utp *utp = (struct ubcore_utp *)arg; + + if (utp->ub_dev->ops->destroy_utp != NULL) + (void)utp->ub_dev->ops->destroy_utp(utp); +} + +static void ubcore_destroy_tpg_in_unreg_dev(void *arg) +{ + struct ubcore_tpg *tpg = (struct ubcore_tpg *)arg; + + if (tpg->ub_dev->ops->destroy_tpg != NULL) + (void)tpg->ub_dev->ops->destroy_tpg(tpg); +} + +static void ubcore_free_driver_res(struct ubcore_device *dev) +{ + if (!dev->attr.tp_maintainer) + return; + + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_RM_VTP], + ubcore_destroy_vtp_in_unreg_dev); + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_RC_VTP], + ubcore_destroy_vtp_in_unreg_dev); + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_UM_VTP], + ubcore_destroy_vtp_in_unreg_dev); + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_TP], + ubcore_destroy_tp_in_unreg_dev); + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_UTP], + ubcore_destroy_utp_in_unreg_dev); + + ubcore_hash_table_free_with_cb(&dev->ht[UBCORE_HT_TPG], + ubcore_destroy_tpg_in_unreg_dev); +} + +static void ubcore_free_hash_tables(struct ubcore_device *dev) +{ + uint32_t i; + + for (i = 0; i < ARRAY_SIZE(g_ht_params); i++) + ubcore_hash_table_free(&dev->ht[i]); +} + +static void ubcore_device_release(struct device *device) +{ +} + +static int ubcore_create_eidtable(struct ubcore_device *dev) +{ + struct ubcore_eid_entry *entry_list; + + if (dev->attr.dev_cap.max_eid_cnt > UBCORE_MAX_EID_CNT || + dev->attr.dev_cap.max_eid_cnt == 0) { + ubcore_log_err("dev max_eid_cnt invalid:%u\n", + dev->attr.dev_cap.max_eid_cnt); + return -EINVAL; + } + + entry_list = kcalloc(1, + dev->attr.dev_cap.max_eid_cnt * + sizeof(struct ubcore_eid_entry), + GFP_KERNEL); + if (entry_list == NULL) + return -ENOMEM; + + dev->eid_table.eid_entries = entry_list; + spin_lock_init(&dev->eid_table.lock); + dev->eid_table.eid_cnt = dev->attr.dev_cap.max_eid_cnt; + dev->dynamic_eid = 1; + return 0; +} + +static void ubcore_destroy_eidtable(struct ubcore_device *dev) +{ + struct ubcore_eid_entry *e = NULL; + + spin_lock(&dev->eid_table.lock); + e = dev->eid_table.eid_entries; + dev->eid_table.eid_entries = NULL; + spin_unlock(&dev->eid_table.lock); + if (e != NULL) + kfree(e); +} + +static int ubcore_send_mue_info_to_all_uvs(const struct ubcore_device *dev, + struct ubcore_nlmsg *req_msg, + const char *mue_action) +{ + struct ubcore_uvs_instance **uvs_list = NULL; + int success_count = 0; + int count = 0; + int ret, i; + + uvs_list = ubcore_uvs_list_get_all_alive(&count); + for (i = 0; i < count; i++) { + ret = ubcore_nl_send_nowait_without_cb(req_msg, uvs_list[i]); + if (ret != 0) + ubcore_log_err( + "Failed to send %s mue dev %s info request to uvs %s(id %u), ret:%d\n", + mue_action, dev->dev_name, uvs_list[i]->name, + uvs_list[i]->id, ret); + else { + success_count++; + ubcore_log_info( + "Success to send %s mue dev %s info request to uvs %s(id %u)\n", + mue_action, dev->dev_name, uvs_list[i]->name, + uvs_list[i]->id); + } + } + + ubcore_uvs_list_put(uvs_list, count); + + if (count != 0 && success_count == count) + return 0; + else + return -1; +} + +static int ubcore_send_remove_mue_dev_info(struct ubcore_device *dev) +{ + struct ubcore_update_mue_dev_info_req *data; + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = kcalloc(1, + sizeof(struct ubcore_nlmsg) + + sizeof(struct ubcore_update_mue_dev_info_req), + GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->msg_type = UBCORE_CMD_UPDATE_MUE_DEV_INFO_REQ; + req_msg->transport_type = dev->transport_type; + req_msg->payload_len = sizeof(struct ubcore_update_mue_dev_info_req); + + /* fill msg payload */ + data = (struct ubcore_update_mue_dev_info_req *)req_msg->payload; + data->dev_fea = dev->attr.dev_cap.feature; + data->cc_entry_cnt = 0; + data->opcode = UBCORE_UPDATE_MUE_DEL; + (void)strscpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME - 1); + + ret = ubcore_send_mue_info_to_all_uvs(dev, req_msg, "delete"); + kfree(req_msg); + return ret; +} + +static struct ubcore_cc_entry *ubcore_get_cc_entry(struct ubcore_device *dev, + uint32_t *cc_entry_cnt) +{ + struct ubcore_cc_entry *cc_entry = NULL; + *cc_entry_cnt = 0; + + if (dev->ops == NULL || dev->ops->query_cc == NULL) { + ubcore_log_err("Invalid parameter!\n"); + return NULL; + } + + cc_entry = dev->ops->query_cc(dev, cc_entry_cnt); + if (cc_entry == NULL) { + ubcore_log_err("Failed to query cc entry\n"); + return NULL; + } + + if (*cc_entry_cnt > UBCORE_CC_IDX_TABLE_SIZE || *cc_entry_cnt == 0) { + kfree(cc_entry); + ubcore_log_err("cc_entry_cnt invalid, %u.\n", *cc_entry_cnt); + return NULL; + } + + return cc_entry; +} + +struct ubcore_nlmsg *ubcore_new_mue_dev_msg(struct ubcore_device *dev) +{ + struct ubcore_update_mue_dev_info_req *data; + struct ubcore_cc_entry *cc_entry; + struct ubcore_cc_entry *array; + struct ubcore_nlmsg *req_msg; + uint32_t cc_entry_cnt; + uint32_t cc_len; + + // If not support cc, cc_entry may be NULL, cc_entry_cnt is 0 + cc_entry = ubcore_get_cc_entry(dev, &cc_entry_cnt); + + cc_len = (uint32_t)sizeof(struct ubcore_update_mue_dev_info_req) + + cc_entry_cnt * (uint32_t)sizeof(struct ubcore_cc_entry); + req_msg = kcalloc(1, sizeof(struct ubcore_nlmsg) + cc_len, GFP_KERNEL); + if (req_msg == NULL) + goto out; + + /* fill msg head */ + req_msg->msg_type = UBCORE_CMD_UPDATE_MUE_DEV_INFO_REQ; + req_msg->transport_type = dev->transport_type; + req_msg->payload_len = cc_len; + + /* fill msg payload */ + data = (struct ubcore_update_mue_dev_info_req *)req_msg->payload; + data->dev_fea = dev->attr.dev_cap.feature; + data->cc_entry_cnt = cc_entry_cnt; + data->opcode = UBCORE_UPDATE_MUE_ADD; + (void)strscpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME - 1); + + if (dev->netdev != NULL && + strnlen(dev->netdev->name, UBCORE_MAX_DEV_NAME) < + UBCORE_MAX_DEV_NAME) + (void)strscpy(data->netdev_name, dev->netdev->name, + UBCORE_MAX_DEV_NAME - 1); + + if (cc_entry != NULL) { + array = (struct ubcore_cc_entry *)data->data; + (void)memcpy(array, cc_entry, + sizeof(struct ubcore_cc_entry) * cc_entry_cnt); + } + +out: + if (cc_entry != NULL) + kfree(cc_entry); + return req_msg; +} + +static int ubcore_query_send_mue_dev_info(struct ubcore_device *dev) +{ + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = ubcore_new_mue_dev_msg(dev); + if (req_msg == NULL) + return -1; + + ret = ubcore_send_mue_info_to_all_uvs(dev, req_msg, "add"); + kfree(req_msg); + return ret; +} + +static int ubcore_create_main_device(struct ubcore_device *dev) +{ + struct ubcore_logic_device *ldev = &dev->ldev; + struct net *net = &init_net; + int ret; + + /* create /sys/class/ubcore/dev_name> */ + write_pnet(&ldev->net, net); + ldev->ub_dev = dev; + ldev->dev = &dev->dev; + + device_initialize(&dev->dev); + dev->dev.class = &g_ubcore_class; + dev->dev.release = ubcore_device_release; + /* dev_set_name will alloc mem use put_device to free */ + (void)dev_set_name(&dev->dev, "%s", dev->dev_name); + dev_set_drvdata(&dev->dev, ldev); + + ret = device_add(&dev->dev); + if (ret) { + put_device(&dev->dev); // to free res used by kobj + return ret; + } + + if (ubcore_fill_logic_device_attr(ldev, dev) != 0) { + device_del(&dev->dev); + put_device(&dev->dev); + ldev->dev = NULL; + ubcore_log_err("failed to fill attributes, device:%s.\n", + dev->dev_name); + return -EPERM; + } + + return 0; +} + +static void ubcore_destroy_main_device(struct ubcore_device *dev) +{ + struct ubcore_logic_device *ldev = &dev->ldev; + + ubcore_unfill_logic_device_attr(ldev, dev); + device_del(ldev->dev); + put_device(ldev->dev); + ldev->dev = NULL; +} + +static void ubcore_init_net_addr(struct ubcore_device *dev) +{ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + dev->attr.tp_maintainer && ubcore_get_netlink_valid()) { + if (ubcore_query_send_mue_dev_info(dev) != 0) + ubcore_log_warn( + "failed to query cc info in ubcore with dev name %s", + dev->dev_name); + } + + ubcore_update_netdev_addr(dev, dev->netdev, UBCORE_ADD_NET_ADDR, false); + ubcore_update_all_vlan_netaddr(dev, UBCORE_ADD_NET_ADDR); +} + +static void uninit_ubcore_mue(struct ubcore_device *dev) +{ + if (!dev->attr.tp_maintainer) + return; + + if (g_ub_mue == dev) + g_ub_mue = NULL; + + if (ubcore_is_ub_device(dev)) + ubcore_sip_table_uninit(&dev->sip_table); +} + +static int init_ubcore_mue(struct ubcore_device *dev) +{ + if (!dev->attr.tp_maintainer) + return 0; + + if (ubcore_is_ub_device(dev)) { + if (ubcore_sip_table_init(&dev->sip_table, + dev->attr.dev_cap.max_netaddr_cnt) != + 0) { + ubcore_log_err("Failed init sip table.\n"); + return -1; + } + } + + /* set mue device */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && g_ub_mue == NULL) + g_ub_mue = dev; + + return 0; +} + +static int init_ubcore_device(struct ubcore_device *dev) +{ + if (dev->ops->query_device_attr != NULL && + dev->ops->query_device_attr(dev, &dev->attr) != 0) { + ubcore_log_err("Failed to query device attributes"); + return -1; + } + + if (init_ubcore_mue(dev) != 0) + return -1; + + INIT_LIST_HEAD(&dev->list_node); + init_rwsem(&dev->client_ctx_rwsem); + INIT_LIST_HEAD(&dev->client_ctx_list); + INIT_LIST_HEAD(&dev->port_list); + init_rwsem(&dev->event_handler_rwsem); + INIT_LIST_HEAD(&dev->event_handler_list); + + if (!dev->attr.virtualization) + (void)ubcore_add_upi_list(dev, UCBORE_DEFAULT_UPI); + + init_completion(&dev->comp); + atomic_set(&dev->use_cnt, 1); + + if (ubcore_create_eidtable(dev) != 0) { + ubcore_log_err("create eidtable failed.\n"); + goto destroy_upi; + } + + if (ubcore_alloc_hash_tables(dev) != 0) { + ubcore_log_err("alloc hash tables failed.\n"); + goto destroy_eidtable; + } + + mutex_init(&dev->ldev_mutex); + INIT_LIST_HEAD(&dev->ldev_list); + return 0; + +destroy_eidtable: + ubcore_destroy_eidtable(dev); +destroy_upi: + if (!dev->attr.virtualization) + ubcore_destroy_upi_list(dev); + + uninit_ubcore_mue(dev); + return -1; +} + +static void ubcore_remove_usv_sip_entry_without_lock(struct ubcore_device *dev, + uint32_t idx) +{ + if (ubcore_del_net_addr(dev, idx) != 0) + ubcore_log_err("Failed to delete net addr for dev %s idx %u", + dev->dev_name, idx); + (void)ubcore_del_sip_entry_without_lock(&dev->sip_table, idx); + (void)ubcore_sip_idx_free_without_lock(&dev->sip_table, idx); +} + +static void ubcore_remove_uvs_sip_info(struct ubcore_device *dev) +{ + struct ubcore_sip_info *sip_info; + uint32_t i; + + mutex_lock(&dev->sip_table.lock); + for (i = 0; i < dev->sip_table.max_sip_cnt; i++) { + sip_info = &dev->sip_table.entry[i].sip_info; + if (!sip_info->is_active) + continue; + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_del_sip(dev, sip_info, i); + + // remove sip in case that uvs does not respond + ubcore_remove_usv_sip_entry_without_lock(dev, i); + } + mutex_unlock(&dev->sip_table.lock); +} + +static void ubcore_uninit_net_addr(struct ubcore_device *dev) +{ + ubcore_free_netdev_port_list(dev); + + ubcore_update_all_vlan_netaddr(dev, UBCORE_DEL_NET_ADDR); + ubcore_update_netdev_addr(dev, dev->netdev, UBCORE_DEL_NET_ADDR, false); + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + dev->attr.tp_maintainer) { + ubcore_remove_uvs_sip_info(dev); + if (ubcore_get_netlink_valid() && + ubcore_send_remove_mue_dev_info(dev) != 0) + ubcore_log_warn("failed to remove mue dev info %s", + dev->dev_name); + } +} + +static void uninit_ubcore_device(struct ubcore_device *dev) +{ + mutex_destroy(&dev->ldev_mutex); + ubcore_free_driver_res(dev); + ubcore_free_hash_tables(dev); + ubcore_destroy_eidtable(dev); + + if (!dev->attr.virtualization) + ubcore_destroy_upi_list(dev); + + uninit_ubcore_mue(dev); +} + +static int ubcore_initiate_negotiation(struct ubcore_device *dev) +{ + struct ubcore_msg_nego_ver_req *data; + struct ubcore_req *req_msg; + uint32_t data_length; + + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return 0; + + if (ubcore_negotiated()) + return 0; + + if (!dev->attr.virtualization) { + ubcore_set_version(UBCORE_VERSION); + ubcore_set_cap(UBCORE_CAP); + return 0; + } + + data_length = sizeof(struct ubcore_msg_nego_ver_req) + + UBCORE_SUPPORT_VERION_NUM * sizeof(uint32_t); + req_msg = + kcalloc(1, sizeof(struct ubcore_req) + data_length, GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->opcode = UBCORE_MSG_NEGO_VER; + req_msg->len = data_length; + + data = (struct ubcore_msg_nego_ver_req *)req_msg->data; + data->cap = UBCORE_CAP; + data->version_num = UBCORE_SUPPORT_VERION_NUM; + (void)memcpy(data->versions, ubcore_get_support_versions(), + UBCORE_SUPPORT_VERION_NUM * sizeof(uint32_t)); + + kfree(req_msg); + return 0; +} + +int ubcore_config_rsvd_jetty(struct ubcore_device *dev, uint32_t min_jetty_id, + uint32_t max_jetty_id) +{ + struct ubcore_device_cfg cfg = { 0 }; + int ret = 0; + + if (dev == NULL || dev->ops == NULL || + dev->ops->config_device == NULL || + dev->ops->query_device_attr == NULL || + dev->transport_type != UBCORE_TRANSPORT_UB) { + return -EINVAL; + } + + cfg.ue_idx = dev->attr.ue_idx; + cfg.mask.bs.reserved_jetty_id_min = 1; + cfg.mask.bs.reserved_jetty_id_max = 1; + cfg.reserved_jetty_id_min = min_jetty_id; + cfg.reserved_jetty_id_max = max_jetty_id; + + ret = dev->ops->config_device(dev, &cfg); + if (ret) { + ubcore_log_info("dev:%s, not support reserved jetty\n", + dev->dev_name); + dev->attr.reserved_jetty_id_max = U32_MAX; + dev->attr.reserved_jetty_id_min = U32_MAX; + } else { + dev->ops->query_device_attr(dev, &dev->attr); + } + + return ret; +} + +static int ubcore_config_device_default(struct ubcore_device *dev) +{ + struct ubcore_device_cfg cfg = { 0 }; + + if (dev == NULL || dev->ops == NULL || + dev->ops->config_device == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + cfg.ue_idx = dev->attr.ue_idx; + + cfg.mask.bs.rc_cnt = 1; + cfg.mask.bs.rc_depth = 1; + cfg.rc_cfg.rc_cnt = dev->attr.dev_cap.max_rc; + cfg.rc_cfg.depth = dev->attr.dev_cap.max_rc_depth; + + (void)ubcore_config_rsvd_jetty(dev, UBCORE_RESERVED_JETTY_ID_MIN, + UBCORE_RESERVED_JETTY_ID_MAX); + /* slice and mask.slice are set to 0 by default */ + + /* If suspend_period and cnt cannot be read, do not need to configure it */ + return dev->ops->config_device(dev, &cfg); +} + +static int ubcore_config_device_in_register(struct ubcore_device *dev) +{ + struct ubcore_msg_config_device_req *data; + struct ubcore_req *req_msg; + + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return 0; + + if (ubcore_get_netlink_valid() == false && !dev->attr.virtualization) { + ubcore_log_info( + "UVS is not connected, and use default config. dev: %s.\n", + dev->dev_name); + return ubcore_config_device_default(dev); + } + + req_msg = kcalloc(1, + sizeof(struct ubcore_req) + + sizeof(struct ubcore_msg_config_device_req), + GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + /* Should not send UBCORE_MSG_CONFIG_DEVICE after register dev + * It will clear fe resource in uvs + */ + req_msg->opcode = UBCORE_MSG_CONFIG_DEVICE; + req_msg->len = (uint32_t)sizeof(struct ubcore_msg_config_device_req); + + data = (struct ubcore_msg_config_device_req *)req_msg->data; + (void)memcpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + data->max_rc_cnt = dev->attr.dev_cap.max_rc; + data->max_rc_depth = dev->attr.dev_cap.max_rc_depth; + data->min_slice = dev->attr.dev_cap.min_slice; + data->max_slice = dev->attr.dev_cap.max_slice; + data->virtualization = dev->attr.virtualization; + + /* New MUE devices need to be query suspend info. */ + data->is_mue_dev = dev->attr.tp_maintainer; + + kfree(req_msg); + return 0; +} + +static void ubcore_clients_add(struct ubcore_device *dev) +{ + struct ubcore_client *client = NULL; + + down_read(&g_clients_rwsem); + list_for_each_entry(client, &g_client_list, list_node) { + if (create_client_ctx(dev, client) != 0) + ubcore_log_warn( + "ubcore device: %s add client:%s context failed.\n", + dev->dev_name, client->client_name); + } + up_read(&g_clients_rwsem); +} + +static void ubcore_clients_remove(struct ubcore_device *dev) +{ + struct ubcore_client_ctx *ctx, *tmp; + + down_read(&dev->client_ctx_rwsem); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client && ctx->client->remove) + ctx->client->remove(dev, ctx->data); + } + up_read(&dev->client_ctx_rwsem); + + down_write(&dev->client_ctx_rwsem); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + list_del(&ctx->list_node); + kfree(ctx); + } + up_write(&dev->client_ctx_rwsem); +} + +static int ubcore_create_logic_device(struct ubcore_logic_device *ldev, + struct ubcore_device *dev, + struct net *net) +{ + /* create /sys/class/ubcore/dev_name> */ + write_pnet(&ldev->net, net); + ldev->ub_dev = dev; + + ldev->dev = device_create(&g_ubcore_class, dev->dev.parent, MKDEV(0, 0), + ldev, "%s", dev->dev_name); + if (IS_ERR(ldev->dev)) { + ubcore_log_err("device create failed, device:%s.\n", + dev->dev_name); + return -ENOMEM; + } + + if (ubcore_fill_logic_device_attr(ldev, dev) != 0) { + device_unregister(ldev->dev); + ldev->dev = NULL; + ubcore_log_err("failed to fill attributes, device:%s.\n", + dev->dev_name); + return -EPERM; + } + + return 0; +} + +static void ubcore_destroy_logic_device(struct ubcore_logic_device *ldev, + struct ubcore_device *dev) +{ + ubcore_unfill_logic_device_attr(ldev, dev); + device_unregister(ldev->dev); + ldev->dev = NULL; +} + +static void ubcore_remove_one_logic_device(struct ubcore_device *dev, + struct net *net) +{ + struct ubcore_logic_device *ldev, *tmp; + + mutex_lock(&dev->ldev_mutex); + list_for_each_entry_safe(ldev, tmp, &dev->ldev_list, node) { + if (net_eq(read_pnet(&ldev->net), net)) { + ubcore_destroy_logic_device(ldev, dev); + list_del(&ldev->node); + kfree(ldev); + break; + } + } + mutex_unlock(&dev->ldev_mutex); +} + +static void ubcore_remove_logic_devices(struct ubcore_device *dev) +{ + struct ubcore_logic_device *ldev, *tmp; + + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return; + + mutex_lock(&dev->ldev_mutex); + list_for_each_entry_safe(ldev, tmp, &dev->ldev_list, node) { + ubcore_destroy_logic_device(ldev, dev); + list_del(&ldev->node); + kfree(ldev); + } + mutex_unlock(&dev->ldev_mutex); +} + +static int ubcore_add_one_logic_device(struct ubcore_device *dev, + struct net *net) +{ + struct ubcore_logic_device *ldev; + int ret; + + mutex_lock(&dev->ldev_mutex); + list_for_each_entry(ldev, &dev->ldev_list, node) { + if (net_eq(read_pnet(&ldev->net), net)) { + mutex_unlock(&dev->ldev_mutex); + return 0; + } + } + + ldev = kzalloc(sizeof(struct ubcore_logic_device), GFP_KERNEL); + if (ldev == NULL) { + mutex_unlock(&dev->ldev_mutex); + return -ENOMEM; + } + + ret = ubcore_create_logic_device(ldev, dev, net); + if (ret) { + kfree(ldev); + mutex_unlock(&dev->ldev_mutex); + ubcore_log_err("add device failed %s in net %u", dev->dev_name, + net->ns.inum); + return ret; + } + + list_add_tail(&ldev->node, &dev->ldev_list); + mutex_unlock(&dev->ldev_mutex); + ubcore_log_info_rl("add device %s in net %u", dev->dev_name, + net->ns.inum); + return 0; +} + +static int ubcore_copy_logic_devices(struct ubcore_device *dev) +{ + struct ubcore_net *unet; + int ret = 0; + + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return 0; + + down_read(&g_ubcore_net_rwsem); + list_for_each_entry(unet, &g_ubcore_net_list, node) { + if (net_eq(read_pnet(&unet->net), read_pnet(&dev->ldev.net))) + continue; + ret = ubcore_add_one_logic_device(dev, read_pnet(&unet->net)); + if (ret != 0) + break; + } + up_read(&g_ubcore_net_rwsem); + + if (ret) + ubcore_remove_logic_devices(dev); + + return ret; +} + +int ubcore_cdev_register(void) +{ + int ret; + + // If sysfs is created, return Success + // Need to add mutex + if (!IS_ERR_OR_NULL(g_ubcore_ctx.ubcore_dev)) + return 0; + + ret = alloc_chrdev_region(&g_ubcore_ctx.ubcore_devno, 0, 1, + UBCORE_DEVICE_NAME); + if (ret != 0) { + ubcore_log_err("alloc chrdev region failed, ret:%d.\n", ret); + return ret; + } + + cdev_init(&g_ubcore_ctx.ubcore_cdev, &g_ubcore_global_ops); + g_ubcore_ctx.ubcore_cdev.owner = THIS_MODULE; + + ret = cdev_add(&g_ubcore_ctx.ubcore_cdev, g_ubcore_ctx.ubcore_devno, 1); + if (ret != 0) { + ubcore_log_err("chrdev add failed, ret:%d.\n", ret); + goto unreg_cdev_region; + } + + /* /dev/ubcore */ + g_ubcore_ctx.ubcore_dev = device_create(&g_ubcore_class, NULL, + g_ubcore_ctx.ubcore_devno, NULL, + UBCORE_DEVICE_NAME); + if (IS_ERR(g_ubcore_ctx.ubcore_dev)) { + ret = (int)PTR_ERR(g_ubcore_ctx.ubcore_dev); + ubcore_log_err("couldn't create device %s, ret:%d.\n", + UBCORE_DEVICE_NAME, ret); + g_ubcore_ctx.ubcore_dev = NULL; + goto del_cdev; + } + ubcore_log_info("ubcore device created success.\n"); + return 0; + +del_cdev: + cdev_del(&g_ubcore_ctx.ubcore_cdev); +unreg_cdev_region: + unregister_chrdev_region(g_ubcore_ctx.ubcore_devno, 1); + return ret; +} + +int ubcore_cdev_unregister(void) +{ + // If sysfs is not created, return Success + // Need to add mutex + if (IS_ERR_OR_NULL(g_ubcore_ctx.ubcore_dev)) + return 0; + + device_destroy(&g_ubcore_class, g_ubcore_ctx.ubcore_cdev.dev); + cdev_del(&g_ubcore_ctx.ubcore_cdev); + unregister_chrdev_region(g_ubcore_ctx.ubcore_devno, 1); + ubcore_log_info("ubcore sysfs device destroyed success.\n"); + return 0; +} + +typedef int (*ubcore_device_handler)(void); + +int ubcore_register_device(struct ubcore_device *dev) +{ + struct ubcore_device *find_dev = NULL; + int ret; + + if (dev == NULL || dev->ops == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) == 0 || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + find_dev = ubcore_find_device_with_name(dev->dev_name); + if (find_dev != NULL) { + ubcore_log_err("Duplicate device name %s.\n", dev->dev_name); + ubcore_put_device(find_dev); + return -EEXIST; + } + + if (init_ubcore_device(dev) != 0) { + ubcore_log_err("failed to init ubcore device.\n"); + return -EINVAL; + } + ubcore_init_net_addr(dev); + + ret = ubcore_create_main_device(dev); + if (ret) { + ubcore_uninit_net_addr(dev); + uninit_ubcore_device(dev); + ubcore_log_err("create main device failed.\n"); + return ret; + } + + if (ubcore_initiate_negotiation(dev) != 0) { + ubcore_log_err("Fail to negotiate version.\n"); + ret = -1; + goto destroy_mdev; + } + + if (ubcore_config_device_in_register(dev) != 0) { + ubcore_log_err("failed to config ubcore device.\n"); + ret = -EPERM; + goto destroy_mdev; + } + ubcore_cgroup_reg_dev(dev); + + down_write(&g_device_rwsem); + ubcore_clients_add(dev); + ret = ubcore_copy_logic_devices(dev); + if (ret) { + ubcore_clients_remove(dev); + up_write(&g_device_rwsem); + + ubcore_log_err("copy logic device failed, device:%s.\n", + dev->dev_name); + goto err; + } + + list_add_tail(&dev->list_node, &g_device_list); + up_write(&g_device_rwsem); + + ubcore_log_info_rl("ubcore device: %s register success.\n", + dev->dev_name); + return 0; + +err: + ubcore_cgroup_unreg_dev(dev); +destroy_mdev: + ubcore_destroy_main_device(dev); + ubcore_uninit_net_addr(dev); + uninit_ubcore_device(dev); + return ret; +} +EXPORT_SYMBOL(ubcore_register_device); + +void ubcore_unregister_device(struct ubcore_device *dev) +{ + if (dev == NULL || strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_warn("Invalid input dev is null ptr.\n"); + return; + } + down_write(&g_device_rwsem); + + /* Remove device from g_device_list */ + list_del(&dev->list_node); + + /* Destroy uburma device, may be scheduled. + * This should not be done within a spin_lock_irqsave + */ + up_write(&g_device_rwsem); + ubcore_clients_remove(dev); + + ubcore_flush_workqueue((int)UBCORE_DISPATCH_EVENT_WQ); + ubcore_flush_workqueue((int)UBCORE_SIP_NOTIFY_WQ); + ubcore_flush_dev_vtp_work(dev); + ubcore_session_flush(dev); + + down_read(&g_device_rwsem); + ubcore_cgroup_unreg_dev(dev); + + ubcore_remove_logic_devices(dev); + ubcore_destroy_main_device(dev); + up_read(&g_device_rwsem); + + ubcore_uninit_net_addr(dev); + ubcore_free_dev_nl_sessions(dev); + + /* Pair with set use_cnt = 1 when init device */ + ubcore_put_device(dev); + /* Wait for use cnt == 0 */ + wait_for_completion(&dev->comp); + uninit_ubcore_device( + dev); /* Protect eid table access security based on ref cnt */ + + ubcore_log_info_rl("ubcore device: %s unregister success.\n", + dev->dev_name); +} +EXPORT_SYMBOL(ubcore_unregister_device); + +void ubcore_stop_requests(struct ubcore_device *dev) +{ + struct ubcore_client_ctx *ctx, *tmp; + + if (dev == NULL || strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("Invalid parameter"); + return; + } + down_read(&dev->client_ctx_rwsem); + list_for_each_entry_safe(ctx, tmp, &dev->client_ctx_list, list_node) { + if (ctx->client && ctx->client->stop) + ctx->client->stop(dev, ctx->data); + } + up_read(&dev->client_ctx_rwsem); + ubcore_log_info("ubcore device: %s stop success.\n", dev->dev_name); +} +EXPORT_SYMBOL(ubcore_stop_requests); + +void ubcore_register_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler) +{ + if (dev == NULL || handler == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + down_write(&dev->event_handler_rwsem); + list_add_tail(&handler->node, &dev->event_handler_list); + up_write(&dev->event_handler_rwsem); +} +EXPORT_SYMBOL(ubcore_register_event_handler); + +static void ubcore_dispatch_event_clients(struct ubcore_event *event) +{ + struct ubcore_event_handler *handler; + struct ubcore_device *dev = event->ub_dev; + + down_read(&dev->event_handler_rwsem); + list_for_each_entry(handler, &dev->event_handler_list, node) + handler->event_callback(event, handler); + up_read(&dev->event_handler_rwsem); +} + +static void ubcore_dispatch_event_task(struct work_struct *work) +{ + struct ubcore_event_work *l_ubcore_event = + container_of(work, struct ubcore_event_work, work); + + ubcore_dispatch_event_clients(&l_ubcore_event->event); + kfree(l_ubcore_event); +} + +int ubcore_dispatch_event(struct ubcore_event *event) +{ + struct ubcore_event_work *l_ubcore_event; + + l_ubcore_event = kzalloc(sizeof(*l_ubcore_event), GFP_ATOMIC); + if (!l_ubcore_event) + return -ENOMEM; + + INIT_WORK(&l_ubcore_event->work, ubcore_dispatch_event_task); + l_ubcore_event->event = *event; + + if (ubcore_queue_work((int)UBCORE_DISPATCH_EVENT_WQ, + &l_ubcore_event->work) != 0) { + kfree(l_ubcore_event); + ubcore_log_err("Queue work failed"); + } + + return 0; +} + +void ubcore_unregister_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler) +{ + if (dev == NULL || handler == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + down_write(&dev->event_handler_rwsem); + list_del(&handler->node); + up_write(&dev->event_handler_rwsem); +} +EXPORT_SYMBOL(ubcore_unregister_event_handler); + +static bool ubcore_preprocess_event(struct ubcore_event *event) +{ + if (event->event_type == UBCORE_EVENT_TP_ERR && + event->element.tp != NULL) { + ubcore_log_info("ubcore detect tp error event with tpn %u", + event->element.tp->tpn); + if (event->ub_dev->transport_type == UBCORE_TRANSPORT_UB) { + if (event->element.tp->state == UBCORE_TP_STATE_ERR || + event->element.tp->state == UBCORE_TP_STATE_RESET) { + ubcore_log_warn( + "Tp %u already in state %d, ignore err event", + event->element.tp->tpn, + (int32_t)event->element.tp->state); + return true; + } + + if (ubcore_change_tp_to_err(event->ub_dev, + event->element.tp) != 0) + ubcore_log_info( + "ubcore change tp %u to error failed", + event->element.tp->tpn); + } + return true; + } else if (event->event_type == UBCORE_EVENT_TP_SUSPEND && + event->element.tp != NULL) { + ubcore_log_info("ubcore detect tp %u suspend event", + event->element.tp->tpn); + ubcore_report_tp_suspend(event->ub_dev, event->element.tp); + return true; + } else if (event->event_type == UBCORE_EVENT_MIGRATE_VTP_SWITCH && + event->element.vtp != NULL) { + ubcore_log_info("ubcore detect migrate vtp %u switch event", + event->element.vtp->cfg.vtpn); + ubcore_report_migrate_vtp(event->ub_dev, event->element.vtp, + UBCORE_EVENT_MIGRATE_VTP_SWITCH); + return true; + } else if (event->event_type == UBCORE_EVENT_MIGRATE_VTP_ROLLBACK && + event->element.vtp != NULL) { + ubcore_log_info("ubcore detect migrate vtp %u rollback event", + event->element.vtp->cfg.vtpn); + ubcore_report_migrate_vtp(event->ub_dev, event->element.vtp, + UBCORE_EVENT_MIGRATE_VTP_ROLLBACK); + return true; + } else if (event->event_type == UBCORE_EVENT_TP_FLUSH_DONE && + event->element.tp != NULL) { + /* Scenarios of flush done + * 1. tp err: ubcore informs lower layer change tp to ERR when tp err. + * It triggers udma to flush after udma change tp to ERR. + * udma reports flush done event after flush done. + * 2. qpc err: udma senses qpc err, flushes and reports flush done event. + * tp has not been changed to ERR in this case. + * ubcore needs to change tp to ERR before change it to RESET. + */ + ubcore_log_info("ubcore detect tp %u flush done event", + event->element.tp->tpn); + if (event->element.tp->state == UBCORE_TP_STATE_RESET) { + ubcore_log_warn( + "Tp %u already in state %d, ignore flush done event", + event->element.tp->tpn, + (int32_t)event->element.tp->state); + return true; + } + if (event->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + ubcore_report_tp_flush_done(event->ub_dev, + event->element.tp); + return true; + } + return false; +} + +void ubcore_dispatch_async_event(struct ubcore_event *event) +{ + if (event == NULL || event->ub_dev == NULL) { + ubcore_log_err("Invalid argument.\n"); + return; + } + + if (ubcore_preprocess_event(event)) + return; + + if (ubcore_dispatch_event(event) != 0) + ubcore_log_err("ubcore_dispatch_event failed"); +} +EXPORT_SYMBOL(ubcore_dispatch_async_event); + +bool ubcore_eid_valid(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udata *udata) +{ + /* For user space */ + if (udata != NULL) { + /* uctx must be set */ + if (udata->uctx == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return false; + } + + /* compare uctx->eid_index with the given eid_index */ + if (udata->uctx->eid_index != eid_index) { + ubcore_log_err( + "eid_indx: %u is consistent with the eid_indx: %u in uctx.\n", + eid_index, udata->uctx->eid_index); + return false; + } + } else { + /* For kernel space */ + /* Check if given eid_idx exists without checking ns, + * as the current->nsproxy->net_ns can be changed. + */ + if (eid_index >= dev->eid_table.eid_cnt) { + ubcore_log_err("eid_indx: %u is over the up limit: %u", + eid_index, dev->eid_table.eid_cnt); + return false; + } + + spin_lock(&dev->eid_table.lock); + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + spin_unlock(&dev->eid_table.lock); + return false; + } + if (!dev->eid_table.eid_entries[eid_index].valid) { + spin_unlock(&dev->eid_table.lock); + return false; + } + spin_unlock(&dev->eid_table.lock); + } + return true; +} + +bool ubcore_eid_accessible(struct ubcore_device *dev, uint32_t eid_index) +{ + struct net *net; + + if (eid_index >= dev->eid_table.eid_cnt) { + ubcore_log_err("eid_indx: %u is over the up limit: %u", + eid_index, dev->eid_table.eid_cnt); + return false; + } + + spin_lock(&dev->eid_table.lock); + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + spin_unlock(&dev->eid_table.lock); + return false; + } + + if (!dev->eid_table.eid_entries[eid_index].valid) { + spin_unlock(&dev->eid_table.lock); + return false; + } + net = dev->eid_table.eid_entries[eid_index].net; + spin_unlock(&dev->eid_table.lock); + return net_eq(net, current->nsproxy->net_ns); +} + +void ubcore_clear_pattern1_eid(struct ubcore_device *dev, union ubcore_eid *eid) +{ + struct ubcore_ueid_cfg cfg; + uint32_t eid_idx = 0; + + if (ubcore_update_eidtbl_by_eid(dev, eid, &eid_idx, false, NULL) != 0) + return; + + cfg.eid = *eid; + cfg.eid_index = eid_idx; + cfg.upi = 0; + (void)ubcore_delete_ueid(dev, dev->attr.ue_idx, &cfg); +} + +void ubcore_clear_pattern3_eid(struct ubcore_device *dev, union ubcore_eid *eid) +{ + struct ubcore_ueid_cfg cfg; + uint32_t pattern3_upi = 0; + uint32_t eid_idx = 0; + + if (ubcore_update_eidtbl_by_eid(dev, eid, &eid_idx, false, NULL) != 0) + return; + + if (dev->attr.virtualization || + ubcore_find_upi_with_dev_name(dev->dev_name, &pattern3_upi) != 0) + return; + + if (pattern3_upi != (uint32_t)UCBORE_INVALID_UPI) { + cfg.eid = *eid; + cfg.eid_index = eid_idx; + cfg.upi = pattern3_upi; + (void)ubcore_delete_ueid(dev, dev->attr.ue_idx, &cfg); + } else { + ubcore_log_err("upi not configured\n"); + } +} + +int ubcore_process_mue_update_eid_tbl_notify_msg(struct ubcore_device *dev, + struct ubcore_resp *resp) +{ + struct ubcore_update_eid_tbl_notify *msg_notify = + (struct ubcore_update_eid_tbl_notify *)resp->data; + struct ubcore_event event = { 0 }; + struct net *net = &init_net; + union ubcore_eid eid; + int ret = 0; + uint32_t i; + + if (dev->eid_table.eid_entries == NULL) { + ubcore_log_err("eid_table is NULL\n"); + ret = -EINVAL; + goto free_resp; + } + + // If in dynamic eid mode, flush it and change to static eid mode. + if (dev->dynamic_eid) { + event.ub_dev = dev; + event.event_type = UBCORE_EVENT_EID_CHANGE; + for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == true) { + eid = dev->eid_table.eid_entries[i].eid; + if (dev->attr.pattern == + (uint8_t)UBCORE_PATTERN_1) + ubcore_clear_pattern1_eid(dev, &eid); + else + ubcore_clear_pattern3_eid(dev, &eid); + event.element.eid_idx = i; + ubcore_dispatch_async_event(&event); + } + } + dev->dynamic_eid = false; + } + + if (msg_notify->is_alloc_eid) + net = read_pnet(&dev->ldev.net); + + if (ubcore_update_eidtbl_by_idx(dev, &msg_notify->eid, + msg_notify->eid_idx, + msg_notify->is_alloc_eid, net) != 0) { + ubcore_log_err("ubcore_update_eidtbl_by_idx fail\n"); + ret = -1; + goto free_resp; + } + +free_resp: + kfree(resp); + return ret; +} + +bool ubcore_dev_accessible(struct ubcore_device *dev, struct net *net) +{ + return (g_shared_ns || net_eq(net, read_pnet(&dev->ldev.net))); +} + +struct ubcore_ucontext * +ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data) +{ + struct ubcore_ucontext *ucontext; + struct ubcore_cg_object cg_obj; + int ret; + + if (dev == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + dev->ops == NULL || dev->ops->alloc_ucontext == NULL || + eid_index >= UBCORE_MAX_EID_CNT) { + ubcore_log_err("Invalid argument.\n"); + return ERR_PTR(-EINVAL); + } + + if (!ubcore_dev_accessible(dev, current->nsproxy->net_ns) || + !ubcore_eid_accessible(dev, eid_index)) { + ubcore_log_err("eid is not accessible by current ns.\n"); + return ERR_PTR(-EPERM); + } + + ret = ubcore_cgroup_try_charge(&cg_obj, dev, + UBCORE_RESOURCE_HCA_HANDLE); + if (ret != 0) { + ubcore_log_err("cgroup charge fail:%d ,dev_name :%s\n", ret, + dev->dev_name); + return ERR_PTR(ret); + } + + ucontext = dev->ops->alloc_ucontext(dev, eid_index, udrv_data); + if (IS_ERR_OR_NULL(ucontext)) { + ubcore_log_err("failed to alloc ucontext.\n"); + ubcore_cgroup_uncharge(&cg_obj, dev, + UBCORE_RESOURCE_HCA_HANDLE); + return UBCORE_CHECK_RETURN_ERR_PTR(ucontext, ENOEXEC); + } + + ucontext->eid_index = eid_index; + ucontext->ub_dev = dev; + ucontext->cg_obj = cg_obj; + + return ucontext; +} +EXPORT_SYMBOL(ubcore_alloc_ucontext); + +void ubcore_free_ucontext(struct ubcore_device *dev, + struct ubcore_ucontext *ucontext) +{ + int ret; + struct ubcore_cg_object cg_obj; + + ubcore_log_info("Start free ucontext, dev ptr: %p, ucontext ptr: %p.\n", + dev, ucontext); + if (dev == NULL || ucontext == NULL || dev->ops == NULL || + dev->ops->free_ucontext == NULL) { + if (dev != NULL) { + ubcore_log_info("dev->ops ptr: %p.\n", dev->ops); + if (dev->ops != NULL) + ubcore_log_info( + "dev->ops->free_ucontext ptr: %p.\n", + dev->ops->free_ucontext); + } + ubcore_log_err("Invalid argument.\n"); + return; + } + cg_obj = ucontext->cg_obj; + + ret = dev->ops->free_ucontext(ucontext); + if (ret != 0) + ubcore_log_err("failed to free_adu, ret: %d.\n", ret); + + ubcore_cgroup_uncharge(&cg_obj, dev, UBCORE_RESOURCE_HCA_HANDLE); +} +EXPORT_SYMBOL(ubcore_free_ucontext); + +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg) +{ + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + if (dev->ops->add_ueid == NULL) + return 0; + + ret = dev->ops->add_ueid(dev, ue_idx, cfg); + if (ret != 0) + ubcore_log_err("failed to add ueid, ue_idx:%hu, eid:" EID_FMT + ", upi:%u, eid_idx:%u, ret:%d\n", + ue_idx, EID_ARGS(cfg->eid), cfg->upi, + cfg->eid_index, ret); + else + ubcore_log_info("success to add ueid, ue_idx:%hu, eid:" EID_FMT + ", upi:%u, eid_idx:%u, ret:%d\n", + ue_idx, EID_ARGS(cfg->eid), cfg->upi, + cfg->eid_index, ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_add_ueid); + +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg) +{ + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + if (dev->ops->delete_ueid == NULL) + return 0; + + ret = dev->ops->delete_ueid(dev, ue_idx, cfg); + if (ret != 0) + ubcore_log_err("failed to del ueid, ue_idx:%hu, eid:" EID_FMT + ", upi:%u, eid_idx:%u, ret:%d\n", + ue_idx, EID_ARGS(cfg->eid), cfg->upi, + cfg->eid_index, ret); + else + ubcore_log_info("success to del ueid, ue_idx:%hu, eid:" EID_FMT + ", upi:%u, eid_idx:%u, ret:%d\n", + ue_idx, EID_ARGS(cfg->eid), cfg->upi, + cfg->eid_index, ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_ueid); + +int ubcore_query_device_attr(struct ubcore_device *dev, + struct ubcore_device_attr *attr) +{ + int ret; + + if (dev == NULL || attr == NULL || dev->ops == NULL || + dev->ops->query_device_attr == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_device_attr(dev, attr); + if (ret != 0) { + ubcore_log_err("failed to query device attr, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_device_attr); + +int ubcore_query_device_status(struct ubcore_device *dev, + struct ubcore_device_status *status) +{ + int ret; + + if (dev == NULL || status == NULL || dev->ops == NULL || + dev->ops->query_device_status == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_device_status(dev, status); + if (ret != 0) { + ubcore_log_err("failed to query device status, ret: %d.\n", + ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_device_status); + +int ubcore_query_resource(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val) +{ + int ret; + + if (dev == NULL || key == NULL || val == NULL || dev->ops == NULL || + dev->ops->query_res == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + ret = dev->ops->query_res(dev, key, val); + if (ret != 0) { + ubcore_log_err("failed to query res, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_resource); + +int ubcore_config_device(struct ubcore_device *dev, + struct ubcore_device_cfg *cfg) +{ + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->config_device == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->config_device(dev, cfg); + if (ret != 0) { + ubcore_log_err("failed to config device, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_config_device); + +int ubcore_user_control(struct ubcore_device *dev, + struct ubcore_user_ctl *k_user_ctl) +{ + int ret; + + if (k_user_ctl == NULL) { + ubcore_log_err("invalid parameter with input nullptr.\n"); + return -1; + } + + if (dev == NULL || dev->ops == NULL || dev->ops->user_ctl == NULL) { + ubcore_log_err("invalid parameter with dev nullptr.\n"); + return -1; + } + + ret = dev->ops->user_ctl(dev, k_user_ctl); + if (ret != 0) { + ubcore_log_err("failed to exec kdrv_user_ctl in %s.\n", + __func__); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ubcore_user_control); + +int ubcore_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val) +{ + int ret; + + if (dev == NULL || key == NULL || val == NULL || dev->ops == NULL || + dev->ops->query_stats == NULL) { + ubcore_log_err("Invalid argument.\n"); + return -EINVAL; + } + + ret = dev->ops->query_stats(dev, key, val); + if (ret != 0) { + ubcore_log_err("Failed to query stats, ret: %d.\n", ret); + return -EPERM; + } + return 0; +} +EXPORT_SYMBOL(ubcore_query_stats); + +static int ubcore_handle_add_sip_idx(struct ubcore_uvs_instance *uvs, + struct ubcore_sip_table *sip_table, + uint32_t index) +{ + struct sip_idx_node *cur, *tmp, *new_node; + + if (uvs == NULL || uvs->policy != UBCORE_RESTORE_POLICY_CLEANUP) { + ubcore_log_info("add sip find no uvs\n"); + return 0; + } + + spin_lock(&uvs->sip_list_lock); + list_for_each_entry_safe(cur, tmp, &uvs->sip_list, node) { + if (cur->sip_idx == index) { + ubcore_log_info("find sip_idx %u in uvs %s\n", index, + uvs->name); + spin_unlock(&uvs->sip_list_lock); + return 0; + } + } + + new_node = kzalloc(sizeof(struct sip_idx_node), GFP_KERNEL); + if (new_node == NULL) { + spin_unlock(&uvs->sip_list_lock); + ubcore_log_err("failed to alloc sip_idx_node\n"); + return -ENOMEM; + } + + new_node->sip_idx = index; + new_node->sip_info = &sip_table->entry[index].sip_info; + list_add_tail(&new_node->node, &uvs->sip_list); + atomic_inc(&sip_table->entry[index].uvs_cnt); + spin_unlock(&uvs->sip_list_lock); + ubcore_log_info("add sip_idx %u to uvs %s, uvs_cnt %u\n", index, + uvs->name, + atomic_read(&sip_table->entry[index].uvs_cnt)); + return 0; +} + +static int ubcore_add_device_sip(struct ubcore_sip_info *sip, uint32_t *sip_idx) +{ + struct ubcore_device *dev; + struct ubcore_uvs_instance *uvs; + uint32_t index; + int ret = 0; + uint32_t pid; + + if (sip == NULL) { + ubcore_log_err("There is an illegal parameter.\n"); + return -1; + } + + pid = (uint32_t)task_tgid_vnr(current); + uvs = ubcore_find_get_uvs_by_pid(pid); + + dev = ubcore_find_mue_device_by_name(sip->dev_name); + if (!dev || dev->transport_type != UBCORE_TRANSPORT_UB) { + ubcore_log_err("update sip, dev:%s no mue, or not UB\n", + sip->dev_name); + return -1; + } + + if (ubcore_lookup_sip_idx(&dev->sip_table, sip, &index) == 0) { + ubcore_log_warn("sip already exists, sip_idx %u\n", index); + if (ubcore_handle_add_sip_idx(uvs, &dev->sip_table, index) != + 0) { + ubcore_log_err("failed to handle sip_idx %u\n", index); + ret = -1; + goto put_uvs; + } + ret = -EEXIST; + } else { + index = ubcore_sip_idx_alloc(&dev->sip_table); + + if (dev->ops != NULL && dev->ops->add_net_addr != NULL) { + ret = dev->ops->add_net_addr(dev, &sip->addr, index); + if (ret != 0) { + ubcore_log_err( + "Failed to set net addr, ret: %d", ret); + goto free_sip_index; + } + } + /* add net_addr entry, record idx -> netaddr mapping */ + ret = ubcore_add_sip_entry(&dev->sip_table, sip, index); + if (ret != 0) { + ubcore_log_err("Failed to add sip entry, sip_idx %u\n", + index); + goto del_net_addr; + } + + if (ubcore_handle_add_sip_idx(uvs, &dev->sip_table, index) != + 0) { + ubcore_log_err("failed to handle sip_idx %u\n", index); + goto del_ubcore_sip_idx; + } + } + + /* nodify uvs add sip info */ + if (ubcore_get_netlink_valid() == true) + (void)ubcore_notify_uvs_add_sip(dev, sip, index); + + *sip_idx = index; + ubcore_uvs_kref_put(uvs); + ubcore_put_device(dev); + return ret; +del_ubcore_sip_idx: + ubcore_del_sip_entry(&dev->sip_table, index); +del_net_addr: + (void)ubcore_del_net_addr(dev, index); +free_sip_index: + (void)ubcore_sip_idx_free(&dev->sip_table, index); +put_uvs: + ubcore_uvs_kref_put(uvs); + ubcore_put_device(dev); + return ret; +} + +static int ubcore_handle_del_sip_idx(struct ubcore_uvs_instance *uvs, + struct ubcore_sip_table *sip_table, + uint32_t index) +{ + struct sip_idx_node *cur, *tmp; + int ret; + + if (uvs == NULL || uvs->policy != UBCORE_RESTORE_POLICY_CLEANUP) { + ret = (int)atomic_read(&sip_table->entry[index].uvs_cnt); + ubcore_log_info("del sip find no uvs, ret %d\n", ret); + return ret; + } + + spin_lock(&uvs->sip_list_lock); + list_for_each_entry_safe(cur, tmp, &uvs->sip_list, node) { + if (cur->sip_idx == index) { + list_del(&cur->node); + kfree(cur); + ret = (int)atomic_dec_return( + &sip_table->entry[index].uvs_cnt); + spin_unlock(&uvs->sip_list_lock); + ubcore_log_info("del uvs sip_idx %u, uvs %s, ret %d\n", + index, uvs->name, ret); + return ret; + } + } + + ret = (int)atomic_read(&sip_table->entry[index].uvs_cnt); + spin_unlock(&uvs->sip_list_lock); + ubcore_log_info("sip_idx %u not found in uvs %s, ret %d\n", index, + uvs->name, ret); + return ret; +} + +static int ubcore_del_device_sip(struct ubcore_sip_info *sip) +{ + struct ubcore_device *dev; + struct ubcore_uvs_instance *uvs; + uint32_t index; + uint32_t pid; + int ret; + + if (sip == NULL) { + ubcore_log_err("There is an illegal parameter.\n"); + return -1; + } + + dev = ubcore_find_mue_device_by_name(sip->dev_name); + if (!dev || dev->transport_type != UBCORE_TRANSPORT_UB) { + ubcore_log_err("del sip, dev:%s no mue, or dev not UB\n", + sip->dev_name); + return -1; + } + + pid = (uint32_t)task_tgid_vnr(current); + uvs = ubcore_find_get_uvs_by_pid(pid); + + if (ubcore_lookup_sip_idx(&dev->sip_table, sip, &index) != 0) { + ubcore_log_err("sip not exists\n"); + goto put_uvs; + } + + if (ubcore_handle_del_sip_idx(uvs, &dev->sip_table, index) == 0) { + ret = ubcore_del_sip_entry(&dev->sip_table, index); + /* Only delete_net_addr when sip changed from valid into invalid to avoid UAF */ + if (ret == 0 && ubcore_del_net_addr(dev, index) != 0) { + ubcore_log_err("Failed to delete net addr"); + goto add_sip_entry; + } + /* nodify uvs add sip info */ + if (ubcore_get_netlink_valid() == true && + ubcore_notify_uvs_del_sip(dev, sip, index) != 0) + goto add_net_addr; + + (void)ubcore_sip_idx_free(&dev->sip_table, index); + } + ubcore_uvs_kref_put(uvs); + ubcore_put_device(dev); + return 0; + +add_net_addr: + if (dev->ops->add_net_addr != NULL) + dev->ops->add_net_addr(dev, &sip->addr, index); +add_sip_entry: + (void)ubcore_add_sip_entry(&dev->sip_table, sip, index); +put_uvs: + ubcore_uvs_kref_put(uvs); + ubcore_put_device(dev); + return -1; +} + +int ubcore_add_sip(struct ubcore_sip_info *sip, uint32_t *sip_idx) +{ + if (sip == NULL || sip_idx == NULL || + strnlen(sip->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + strnlen(sip->netdev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + return ubcore_add_device_sip(sip, sip_idx); +} +EXPORT_SYMBOL(ubcore_add_sip); + +int ubcore_delete_sip(struct ubcore_sip_info *sip) +{ + if (sip == NULL || strnlen(sip->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + + return ubcore_del_device_sip(sip); +} +EXPORT_SYMBOL(ubcore_delete_sip); + +struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, + uint32_t *cnt) +{ + struct ubcore_eid_info *eid_list; + struct ubcore_eid_info *tmp; + uint32_t count; + uint32_t i; + + if (dev == NULL || dev->attr.dev_cap.max_eid_cnt == 0 || + dev->attr.dev_cap.max_eid_cnt > UBCORE_MAX_EID_CNT || cnt == NULL || + IS_ERR_OR_NULL(dev->eid_table.eid_entries) || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("invalid input parameter.\n"); + return NULL; + } + + tmp = vmalloc(dev->attr.dev_cap.max_eid_cnt * + sizeof(struct ubcore_eid_info)); + if (tmp == NULL) + return NULL; + + spin_lock(&dev->eid_table.lock); + for (i = 0, count = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == true) { + tmp[count].eid = dev->eid_table.eid_entries[i].eid; + tmp[count].eid_index = i; + count++; + } + } + spin_unlock(&dev->eid_table.lock); + if (count == 0) { + vfree(tmp); + ubcore_log_warn("There is no eids in device: %s eid_table.\n", + dev->dev_name); + return NULL; + } + *cnt = count; + + eid_list = vmalloc(count * sizeof(struct ubcore_eid_info)); + if (eid_list == NULL) { + vfree(tmp); + return NULL; + } + for (i = 0; i < count; i++) + eid_list[i] = tmp[i]; + + vfree(tmp); + return eid_list; +} +EXPORT_SYMBOL(ubcore_get_eid_list); + +void ubcore_free_eid_list(struct ubcore_eid_info *eid_list) +{ + if (eid_list != NULL) + vfree(eid_list); +} +EXPORT_SYMBOL(ubcore_free_eid_list); + +static int ubcore_lookup_sip_by_addr(struct ubcore_device *dev, + const union ubcore_net_addr_union *addr) +{ + struct ubcore_sip_table *sip_table = NULL; + uint32_t i; + + sip_table = &dev->sip_table; + mutex_lock(&sip_table->lock); + for (i = 0; i < sip_table->max_sip_cnt; i++) { + if (sip_table->entry[i].sip_info.is_active && + memcmp(addr, &sip_table->entry[i].sip_info.addr.net_addr, + sizeof(union ubcore_net_addr_union)) == 0) { + mutex_unlock(&sip_table->lock); + return 0; + } + } + mutex_unlock(&sip_table->lock); + + return -1; +} + +struct ubcore_device * +ubcore_lookup_mue_by_sip_addr(union ubcore_net_addr_union *addr, + enum ubcore_transport_type type) +{ + struct ubcore_device *dev = NULL, *target = NULL; + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev != NULL && dev->attr.tp_maintainer && + dev->transport_type == type) { + if (ubcore_lookup_sip_by_addr(dev, addr) == 0) { + target = dev; + ubcore_get_device(dev); + break; + } + } + } + up_read(&g_device_rwsem); + + return target; +} + +static void ubcore_modify_eid_ns(struct ubcore_device *dev, struct net *net) +{ + struct ubcore_eid_entry *e; + uint32_t i; + + if (dev->eid_table.eid_entries == NULL) + return; + + spin_lock(&dev->eid_table.lock); + for (i = 0; i < dev->eid_table.eid_cnt; i++) { + e = &dev->eid_table.eid_entries[i]; + if (e->valid && !net_eq(e->net, net)) + e->net = net; + } + spin_unlock(&dev->eid_table.lock); +} + +static void ubcore_invalidate_eid_ns(struct ubcore_device *dev, struct net *net) +{ + struct ubcore_eid_entry *e; + uint32_t i; + + if (dev->eid_table.eid_entries == NULL) + return; + + spin_lock(&dev->eid_table.lock); + for (i = 0; i < dev->eid_table.eid_cnt; i++) { + e = &dev->eid_table.eid_entries[i]; + if (e->valid && net_eq(e->net, net)) { + e->net = &init_net; + e->valid = false; + } + } + spin_unlock(&dev->eid_table.lock); +} + +static int ubcore_modify_dev_ns(struct ubcore_device *dev, struct net *net, + bool exit) +{ + struct net *cur; + int ret; + + cur = read_pnet(&dev->ldev.net); + if (net_eq(net, cur)) + return 0; + + kobject_uevent(&dev->ldev.dev->kobj, KOBJ_REMOVE); + ubcore_clients_remove(dev); + write_pnet(&dev->ldev.net, net); + ret = device_rename(dev->ldev.dev, dev_name(dev->ldev.dev)); + if (ret) { + write_pnet(&dev->ldev.net, cur); + ubcore_log_err("Failed to rename device in the new ns.\n"); + goto out; + } + + if (exit) + ubcore_invalidate_eid_ns(dev, cur); + else + ubcore_modify_eid_ns(dev, net); + +out: + ubcore_clients_add(dev); + kobject_uevent(&dev->ldev.dev->kobj, KOBJ_ADD); + return ret; +} + +int ubcore_set_dev_ns(char *device_name, uint32_t ns_fd) +{ + struct ubcore_device *dev = NULL, *tmp; + struct net *net; + int ret = 0; + + if (g_shared_ns) { + ubcore_log_err( + "Can not set device to ns under shared ns mode.\n"); + return -EPERM; + } + + net = get_net_ns_by_fd(ns_fd); + if (IS_ERR(net)) { + ubcore_log_err("Failed to get ns by fd.\n"); + return PTR_ERR(net); + } + + /* Find device by name */ + /* device_name len checked by genl */ + down_read(&g_device_rwsem); + list_for_each_entry(tmp, &g_device_list, list_node) { + if (strcmp(dev_name(tmp->ldev.dev), device_name) == 0) { + dev = tmp; + break; + } + } + if (dev == NULL || dev->transport_type != UBCORE_TRANSPORT_UB) { + ret = -EINVAL; + ubcore_log_err("Failed to find device.\n"); + goto out; + } + + /* Put device in the new ns */ + ret = ubcore_modify_dev_ns(dev, net, false); + +out: + up_read(&g_device_rwsem); + put_net(net); + return ret; +} + +int ubcore_set_ns_mode(bool shared) +{ + unsigned long flags; + + down_write(&g_ubcore_net_rwsem); + if (g_shared_ns == shared) { + up_write(&g_ubcore_net_rwsem); + return 0; + } + spin_lock_irqsave(&g_ubcore_net_lock, flags); + if (!list_empty(&g_ubcore_net_list)) { + spin_unlock_irqrestore(&g_ubcore_net_lock, flags); + up_write(&g_ubcore_net_rwsem); + ubcore_log_err("Failed to modify ns mode with existing ns"); + return -EPERM; + } + g_shared_ns = shared; + spin_unlock_irqrestore(&g_ubcore_net_lock, flags); + up_write(&g_ubcore_net_rwsem); + return 0; +} + +void ubcore_net_exit(struct net *net) +{ + struct ubcore_net *unet = net_generic(net, g_ubcore_net_id); + struct ubcore_device *dev; + unsigned long flags; + + if (unet == NULL) + return; + + ubcore_log_info("net exit %u, net:0x%p", net->ns.inum, net); + down_write(&g_ubcore_net_rwsem); + spin_lock_irqsave(&g_ubcore_net_lock, flags); + if (list_empty(&unet->node)) { + spin_unlock_irqrestore(&g_ubcore_net_lock, flags); + up_write(&g_ubcore_net_rwsem); + return; + } + list_del_init(&unet->node); + spin_unlock_irqrestore(&g_ubcore_net_lock, flags); + up_write(&g_ubcore_net_rwsem); + + if (!g_shared_ns) { + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->transport_type != UBCORE_TRANSPORT_UB || + !net_eq(read_pnet(&dev->ldev.net), net)) + continue; + (void)ubcore_modify_dev_ns(dev, &init_net, true); + } + up_read(&g_device_rwsem); + } else { + down_write(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->transport_type != UBCORE_TRANSPORT_UB) + continue; + ubcore_remove_one_logic_device(dev, net); + ubcore_invalidate_eid_ns(dev, net); + } + up_write(&g_device_rwsem); + } +} + +static int ubcore_net_init(struct net *net) +{ + struct ubcore_net *unet = net_generic(net, g_ubcore_net_id); + struct ubcore_device *dev; + unsigned long flags; + int ret = 0; + + if (unet == NULL) + return 0; + + ubcore_log_info("net init %u, net:0x%p", net->ns.inum, net); + write_pnet(&unet->net, net); + if (net_eq(net, &init_net)) { + INIT_LIST_HEAD(&unet->node); + return 0; + } + + spin_lock_irqsave(&g_ubcore_net_lock, flags); + list_add_tail(&unet->node, &g_ubcore_net_list); + spin_unlock_irqrestore(&g_ubcore_net_lock, flags); + + if (!g_shared_ns) + return 0; + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (dev->transport_type != UBCORE_TRANSPORT_UB) + continue; + + down_read(&g_ubcore_net_rwsem); + ret = ubcore_add_one_logic_device(dev, net); + up_read(&g_ubcore_net_rwsem); + if (ret) + break; + } + up_read(&g_device_rwsem); + if (ret) + ubcore_net_exit(net); + + /* return ret will cause error starting a container */ + return 0; +} + +int ubcore_get_max_mtu(struct ubcore_device *dev, enum ubcore_mtu *mtu) +{ + enum ubcore_mtu mtu_min = UBCORE_MTU_8192; + enum ubcore_mtu mtu_tmp; + bool found = false; + uint32_t i; + + if (dev == NULL || mtu == NULL) { + ubcore_log_info("invalid parameter"); + return -1; + } + + for (i = 0; i < UBCORE_MAX_PORT_CNT; i++) { + mtu_tmp = dev->attr.port_attr[i].max_mtu; + if ((uint32_t)mtu_tmp != 0 && mtu_tmp <= mtu_min) { + mtu_min = mtu_tmp; + found = true; + } + } + + if (!found) { + ubcore_log_err("Failed to find valid max_mtu"); + return -1; + } + + *mtu = mtu_min; + return 0; +} + +static struct pernet_operations g_ubcore_net_ops = { + .init = ubcore_net_init, + .exit = ubcore_net_exit, + .id = &g_ubcore_net_id, + .size = sizeof(struct ubcore_net) +}; + +int ubcore_register_pnet_ops(void) +{ + return register_pernet_device(&g_ubcore_net_ops); +} +void ubcore_unregister_pnet_ops(void) +{ + unregister_pernet_device(&g_ubcore_net_ops); +} + +int ubcore_class_register(void) +{ + int ret; + + // Allocate device numbers for MUE + ret = alloc_chrdev_region(&g_dynamic_mue_devnum, 0, UBCORE_MAX_MUE_NUM, + UBCORE_DEVICE_NAME); + if (ret != 0) { + ubcore_log_err( + "couldn't register dynamic device number for mue.\n"); + return ret; + } + + ret = class_register(&g_ubcore_class); + if (ret) { + unregister_chrdev_region(g_dynamic_mue_devnum, + UBCORE_MAX_MUE_NUM); + ubcore_log_err("couldn't create ubcore class\n"); + } + return ret; +} + +void ubcore_class_unregister(void) +{ + class_unregister(&g_ubcore_class); + unregister_chrdev_region(g_dynamic_mue_devnum, UBCORE_MAX_MUE_NUM); +} + +void ubcore_dispatch_mgmt_event(struct ubcore_mgmt_event *event) +{ + struct ubcore_eid_info *eid_info; + struct net *net = &init_net; + int ret; + + if (event == NULL || event->ub_dev == NULL || + event->element.eid_info == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return; + } + eid_info = event->element.eid_info; + + switch (event->event_type) { + case UBCORE_MGMT_EVENT_EID_ADD: + net = read_pnet(&event->ub_dev->ldev.net); + ret = ubcore_update_eidtbl_by_idx(event->ub_dev, &eid_info->eid, + eid_info->eid_index, true, + net); + break; + case UBCORE_MGMT_EVENT_EID_RMV: + ret = ubcore_update_eidtbl_by_idx(event->ub_dev, &eid_info->eid, + eid_info->eid_index, false, + net); + break; + default: + ubcore_log_err("Invalid event_type: %d.\n", event->event_type); + return; + } + + if (ret != 0) + ubcore_log_err( + "Failed to update eid table, index: %u, type: %d.\n", + eid_info->eid_index, event->event_type); + + if (eid_info->eid_index == 0 && + ubcore_call_cm_eid_ops(event->ub_dev, event->element.eid_info, + event->event_type) != 0) + ubcore_log_err("cast eid to ubcm failed.\n"); +} +EXPORT_SYMBOL(ubcore_dispatch_mgmt_event); + +struct ubcore_device *ubcore_get_device_by_eid(union ubcore_eid *eid, + enum ubcore_transport_type type) +{ + struct ubcore_device *dev, *target = NULL; + uint32_t idx; + + if (eid == NULL || type >= UBCORE_TRANSPORT_MAX) { + ubcore_log_err("Invalid parameter.\n"); + return NULL; + } + + down_read(&g_device_rwsem); + list_for_each_entry(dev, &g_device_list, list_node) { + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) + continue; + for (idx = 0; idx < dev->attr.dev_cap.max_eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0 && + dev->transport_type == type) { + target = dev; + break; + } + } + if (target != NULL) + break; + } + up_read(&g_device_rwsem); + return target; +} +EXPORT_SYMBOL(ubcore_get_device_by_eid); diff --git a/drivers/ub/urma/ubcore/ubcore_device.h b/drivers/ub/urma/ubcore/ubcore_device.h new file mode 100644 index 000000000000..80fdc11154ef --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_device.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device head file + * Author: Yan Fangfang + * Create: 2024-02-05 + * Note: + * History: 2024-02-05: Create file + */ + +#ifndef UBCORE_DEVICE_H +#define UBCORE_DEVICE_H + +#include "ubcore_priv.h" + +#define UBCORE_DEVNODE_MODE (0666) + +int ubcore_register_pnet_ops(void); +void ubcore_unregister_pnet_ops(void); +int ubcore_class_register(void); +void ubcore_class_unregister(void); +int ubcore_cdev_register(void); +int ubcore_cdev_unregister(void); +int ubcore_set_ns_mode(bool shared); +int ubcore_set_dev_ns(char *device_name, uint32_t ns_fd); +bool ubcore_dev_accessible(struct ubcore_device *dev, struct net *net); +int ubcore_get_max_mtu(struct ubcore_device *dev, enum ubcore_mtu *mtu); +struct ubcore_nlmsg *ubcore_new_mue_dev_msg(struct ubcore_device *dev); +/* Only valid for user space */ +bool ubcore_eid_accessible(struct ubcore_device *dev, uint32_t eid_index); +/* Valid for both user space and kerner space */ +bool ubcore_eid_valid(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udata *udata); +int ubcore_config_rsvd_jetty(struct ubcore_device *dev, uint32_t min_jetty_id, + uint32_t max_jetty_id); + +int ubcore_process_mue_update_eid_tbl_notify_msg(struct ubcore_device *dev, + struct ubcore_resp *resp); +void ubcore_clear_pattern1_eid(struct ubcore_device *dev, + union ubcore_eid *eid); +void ubcore_clear_pattern3_eid(struct ubcore_device *dev, + union ubcore_eid *eid); +int ubcore_delete_sip(struct ubcore_sip_info *sip); +void ubcore_uvs_release_sip_list(struct ubcore_uvs_instance *uvs); + +static inline bool ubcore_check_ctrlplane(struct ubcore_device *dev) +{ + if ((dev != NULL) && (dev->ops != NULL) && + (dev->ops->get_tp_list != NULL)) + return true; + + return false; +} + +#endif // UBCORE_DEVICE_H diff --git a/drivers/ub/urma/ubcore/ubcore_dp.c b/drivers/ub/urma/ubcore/ubcore_dp.c new file mode 100644 index 000000000000..1f357e0b21bb --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_dp.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: kmod ub data path API + * Author: sunfang + * Create: 2023-05-09 + * Note: + * History: 2023-05-09 + */ +#include "ubcore_log.h" +#include +#include +#include + +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, + struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_send_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_send_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_send_wr); + +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, + struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_recv_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_recv_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_recv_wr); + +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->post_jfs_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfs->ub_dev->ops; + return dev_ops->post_jfs_wr(jfs, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfs_wr); + +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops == NULL || + jfr->ub_dev->ops->post_jfr_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfr->ub_dev->ops; + return dev_ops->post_jfr_wr(jfr, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfr_wr); + +int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->poll_jfc == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->poll_jfc(jfc, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_poll_jfc); + +int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->rearm_jfc == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->rearm_jfc(jfc, solicited_only); +} +EXPORT_SYMBOL(ubcore_rearm_jfc); diff --git a/drivers/ub/urma/ubcore/ubcore_genl.c b/drivers/ub/urma/ubcore/ubcore_genl.c new file mode 100644 index 000000000000..4e95bff35284 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_genl.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore kernel module + * Author: Yanchao Zhao + * Create: 2024-01-18 + * Note: + * History: 2024-01-18: create file + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "ubcore_msg.h" +#include "ubcore_cmd.h" +#include "ubcore_device.h" +#include "ubcore_genl_admin.h" +#include "ubcore_genl_define.h" +#include "ubcore_netlink.h" +#include "ubcore_genl.h" + +static const struct nla_policy ubcore_policy[NUM_UBCORE_ATTR] = { + [UBCORE_ATTR_UNSPEC] = { 0 }, + [UBCORE_HDR_COMMAND] = { .type = NLA_U32 }, + [UBCORE_HDR_ARGS_LEN] = { .type = NLA_U32 }, + [UBCORE_HDR_ARGS_ADDR] = { .type = NLA_U64 }, + [UBCORE_ATTR_NS_MODE] = { .type = NLA_U8 }, + [UBCORE_ATTR_DEV_NAME] = { .type = NLA_STRING, + .len = UBCORE_MAX_DEV_NAME - 1 }, + [UBCORE_ATTR_NS_FD] = { .type = NLA_U32 }, + [UBCORE_MSG_SEQ] = { .type = NLA_U32 }, + [UBCORE_MSG_TYPE] = { .type = NLA_U32 }, + [UBCORE_TRANSPORT_TYPE] = { .type = NLA_U32 }, + [UBORE_SRC_ID] = { .len = UBCORE_EID_SIZE }, + [UBORE_DST_ID] = { .len = UBCORE_EID_SIZE }, + [UBCORE_PAYLOAD_DATA] = { .type = NLA_BINARY }, + [UBCORE_UPDATE_EID_RET] = { .type = NLA_S32 } +}; + +static const struct genl_ops ubcore_genl_ops[] = { + { .cmd = UBCORE_CMD_QUERY_STATS, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .doit = ubcore_query_stats_ops }, + { .cmd = UBCORE_CMD_QUERY_RES, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .start = ubcore_query_res_start, + .dumpit = ubcore_query_res_dump, + .done = ubcore_query_res_done }, + { .cmd = UBCORE_CMD_ADD_EID, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .start = ubcore_add_eid_start, + .dumpit = ubcore_add_eid_dump, + .done = ubcore_add_eid_done }, + { .cmd = UBCORE_CMD_DEL_EID, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .start = ubcore_delete_eid_start, + .dumpit = ubcore_delete_eid_dump, + .done = ubcore_delete_eid_done }, + { .cmd = UBCORE_CMD_SET_EID_MODE, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_set_eid_mode_ops }, + { .cmd = UBCORE_CMD_SET_NS_MODE, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_set_ns_mode_ops }, + { .cmd = UBCORE_CMD_SET_DEV_NS, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_set_dev_ns_ops }, + { .cmd = UBCORE_CMD_GET_TOPO_INFO, + .policy = ubcore_policy, + .maxattr = ARRAY_SIZE(ubcore_policy) - 1, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_get_topo_info }, + { .cmd = UBCORE_CMD_SET_GENL_PID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_set_genl_pid_ops }, + { .cmd = UBCORE_CMD_UVS_INIT_RES, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .start = ubcore_get_uvs_init_res_start, + .dumpit = ubcore_get_uvs_init_res_dump, + .done = ubcore_get_uvs_init_res_done }, + { + .cmd = UBCORE_CMD_MUE2UE_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_mue2ue_resp_ops, + }, + { + .cmd = UBCORE_CMD_RESTORE_TP_REQ, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp_req_ops, + }, + { + .cmd = UBCORE_CMD_QUERY_TP_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp_resp_ops, + }, + { + .cmd = UBCORE_CMD_RESTORE_TP_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp_resp_ops, + }, + { + .cmd = UBCORE_CMD_ADD_SIP_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp_resp_ops, + }, + { + .cmd = UBCORE_CMD_DEL_SIP_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp_resp_ops, + }, + { + .cmd = UBCORE_CMD_UPDATE_MUE_DEV_INFO_RESP, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_update_mue_dev_info_resp_ops, + }, + { + .cmd = UBCORE_CMD_VTP_STATUS_NOTIFY, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .flags = GENL_ADMIN_PERM, + .doit = ubcore_tp2ue_vtp_status_notify_ops, + }, + { .cmd = UBCORE_CMD_MSG_ACK, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + .flags = GENL_ADMIN_PERM, + .doit = ubcore_nl_msg_ack_ops, + }, +}; + +/* ubcore family definition */ + +struct genl_family ubcore_genl_family __ro_after_init = { + .hdrsize = 0, + .name = UBCORE_GENL_FAMILY_NAME, + .version = UBCORE_GENL_FAMILY_VERSION, + .maxattr = UBCORE_ATTR_MAX, + .policy = ubcore_policy, + + .resv_start_op = UBCORE_CMD_MAX, + + .netnsok = true, + .module = THIS_MODULE, + .ops = ubcore_genl_ops, + .n_ops = ARRAY_SIZE(ubcore_genl_ops) +}; + +static int ubcore_nl_event_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct netlink_notify *notify = data; + + if (action != NETLINK_URELEASE || notify == NULL || + notify->protocol != NETLINK_GENERIC) + return NOTIFY_DONE; + + ubcore_unset_genl_pid_ops(notify->portid); + return NOTIFY_DONE; +} + +static struct notifier_block g_nl_notifier = { + .notifier_call = ubcore_nl_event_notifier_call, +}; + +int __init ubcore_genl_init(void) +{ + int ret; + + ret = genl_register_family(&ubcore_genl_family); + if (ret != 0) + ubcore_log_err("Failed to ubcore genl init\n"); + + return netlink_register_notifier(&g_nl_notifier); +} + +/* exit network namespace */ +void ubcore_genl_exit(void) +{ + (void)netlink_unregister_notifier(&g_nl_notifier); + (void)genl_unregister_family(&ubcore_genl_family); +} diff --git a/drivers/ub/urma/ubcore/ubcore_genl.h b/drivers/ub/urma/ubcore/ubcore_genl.h new file mode 100644 index 000000000000..f5a7476b8f5b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_genl.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: define hash table ops + * Author: Zhao Yanchao + * Create: 2024-01-18 + * Note: + * History: 2024-01-18 Zhao Yanchao Add base code + */ + +#ifndef UBCORE_GENL_H +#define UBCORE_GENL_H + +#include +int ubcore_genl_init(void) __init; +void ubcore_genl_exit(void); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_genl_admin.c b/drivers/ub/urma/ubcore/ubcore_genl_admin.c new file mode 100644 index 000000000000..6dad40b0a03b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_genl_admin.c @@ -0,0 +1,963 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore kernel module + * Author: Yanchao Zhao + * Create: 2024-01-18 + * Note: + * History: 2024-01-18: create file + */ +#include +#include +#include +#include +#include +#include +#include "ubcore_genl_define.h" +#include +#include "ubcore_msg.h" +#include +#include "ubcore_priv.h" +#include "ubcore_cmd.h" +#include "ubcore_device.h" +#include "ubcore_main.h" +#include "ubcore_genl_admin.h" + +#define CB_ARGS_DEV_BUF 0 +#define CB_ARGS_CMD_TYPE 1 +#define CB_ARGS_SART_IDX 2 +#define CB_ARGS_NEXT_TYPE 3 +#define CB_ARGS_BUF_LEN 4 +#define CB_ARGS_KEY_CNT 5 + +enum { + UBCORE_RES_TPG_TP_CNT, + UBCORE_RES_TPG_DSCP, + UBCORE_RES_TPG_TP_VAL, + UBCORE_RES_JTGRP_JETTY_CNT, + UBCORE_RES_JTGRP_JETTY_VAL, + UBCORE_RES_SEGVAL_SEG_CNT, + UBCORE_RES_SEGVAL_SEG_VAL, + UBCORE_RES_DEV_SEG_CNT, + UBCORE_RES_DEV_SEG_VAL, + UBCORE_RES_DEV_JFS_CNT, + UBCORE_RES_DEV_JFS_VAL, + UBCORE_RES_DEV_JFR_CNT, + UBCORE_RES_DEV_JFR_VAL, + UBCORE_RES_DEV_JFC_CNT, + UBCORE_RES_DEV_JFC_VAL, + UBCORE_RES_DEV_JETTY_CNT, + UBCORE_RES_DEV_JETTY_VAL, + UBCORE_RES_DEV_JTGRP_CNT, + UBCORE_RES_DEV_JTGRP_VAL, + UBCORE_RES_DEV_RC_CNT, + UBCORE_RES_DEV_RC_VAL, + UBCORE_RES_DEV_VTP_CNT, + UBCORE_RES_DEV_VTP_VAL, + UBCORE_RES_DEV_TP_CNT, + UBCORE_RES_DEV_TP_VAL, + UBCORE_RES_DEV_TPG_CNT, + UBCORE_RES_DEV_TPG_VAL, + UBCORE_RES_DEV_UTP_CNT, + UBCORE_RES_DEV_UTP_VAL, + UBCORE_RES_UPI_VAL, + UBCORE_RES_VTP_VAL, + UBCORE_RES_TP_VAL, + UBCORE_RES_UTP_VAL, + UBCORE_RES_JFS_VAL, + UBCORE_RES_JFR_VAL, + UBCORE_RES_JETTY_VAL, + UBCORE_RES_JFC_VAL, + UBCORE_RES_RC_VAL, + UBCORE_ATTR_RES_LAST +}; + +static int ubcore_parse_admin_res_cmd(struct netlink_callback *cb, void *dst, + uint32_t copy_len) +{ + struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; + + uint64_t args_addr; + + if (!attrs[UBCORE_HDR_ARGS_LEN] || !attrs[UBCORE_HDR_ARGS_ADDR]) + return -EINVAL; + + args_addr = nla_get_u64(attrs[UBCORE_HDR_ARGS_ADDR]); + + return ubcore_copy_from_user(dst, (void __user *)(uintptr_t)args_addr, + copy_len); +} + +int ubcore_query_stats_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_cmd_query_stats arg = { 0 }; + struct ubcore_stats_com_val com_val; + struct ubcore_stats_key key = { 0 }; + struct ubcore_stats_val val; + struct ubcore_device *dev; + uint64_t args_addr; + int ret = -EINVAL; + + if (!info->attrs[UBCORE_HDR_ARGS_LEN] || + !info->attrs[UBCORE_HDR_ARGS_ADDR]) + return ret; + args_addr = nla_get_u64(info->attrs[UBCORE_HDR_ARGS_ADDR]); + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)args_addr, + sizeof(struct ubcore_cmd_query_stats)); + if (ret != 0) + return ret; + + arg.in.dev_name[UBCORE_MAX_DEV_NAME - 1] = '\0'; + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev failed, dev:%s, arg_in: %s.\n", + dev == NULL ? "NULL" : dev->dev_name, + arg.in.dev_name); + return -EINVAL; + } + + key.type = (uint8_t)arg.in.type; + key.key = arg.in.key; + val.addr = (uint64_t)&com_val; + val.len = (uint32_t)sizeof(struct ubcore_stats_com_val); + + ret = ubcore_query_stats(dev, &key, &val); + if (ret != 0) { + ubcore_put_device(dev); + return ret; + } + + ubcore_put_device(dev); + (void)memcpy(&arg.out, &com_val, sizeof(struct ubcore_stats_com_val)); + return ubcore_copy_to_user((void __user *)(uintptr_t)args_addr, &arg, + sizeof(struct ubcore_cmd_query_stats)); +} + +static int ubcore_update_ueid(struct netlink_callback *cb, + enum ubcore_msg_opcode op) +{ + struct ubcore_cmd_update_ueid arg; + struct ubcore_update_eid_ctx *ctx; + struct net *net = &init_net; + struct ubcore_device *dev; + int ret = -EINVAL; + struct timespec64 tv; + + ret = ubcore_parse_admin_res_cmd(cb, &arg, + sizeof(struct ubcore_cmd_update_ueid)); + if (ret) + return ret; + + arg.in.dev_name[UBCORE_MAX_DEV_NAME - 1] = '\0'; + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev_name: %s failed.\n", arg.in.dev_name); + return -EPERM; + } + + if (dev->dynamic_eid) { + ubcore_log_err( + "The dynamic mode of mue does not support eid change\n"); + ubcore_put_device(dev); + return -EPERM; + } + if (dev->attr.tp_maintainer && ubcore_get_netlink_valid() == false) { + ubcore_put_device(dev); + return -EPERM; + } + + if (arg.in.ns_fd >= 0) { + net = get_net_ns_by_fd(arg.in.ns_fd); + if (IS_ERR(net) || !ubcore_dev_accessible(dev, net)) { + ubcore_put_device(dev); + ubcore_log_err("invalid net ns.\n"); + return (int)PTR_ERR(net); + } + } else if (op == UBCORE_MSG_ALLOC_EID) { + net = read_pnet(&dev->ldev.net); + } + + ctx = kcalloc(1, sizeof(struct ubcore_update_eid_ctx), GFP_KERNEL); + if (ctx == NULL) { + ubcore_put_device(dev); + if (arg.in.ns_fd >= 0) + put_net(net); + return -ENOMEM; + } + ret = ubcore_msg_discover_eid(dev, arg.in.eid_index, op, net, ctx); + if (ret != 0) { + ubcore_put_device(dev); + if (arg.in.ns_fd >= 0) + put_net(net); + kfree(ctx); + return -EPERM; + } + + if (arg.in.ns_fd >= 0) + ctx->net = net; + ctx->dev = dev; + ktime_get_ts64(&tv); + ctx->start_ts = tv.tv_sec; + cb->args[0] = (long)ctx; + return 0; +} + +int ubcore_set_eid_mode_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_cmd_set_eid_mode arg; + struct ubcore_device *dev; + struct ubcore_event event; + union ubcore_eid eid; + uint64_t args_addr; + int ret = -EINVAL; + uint32_t i; + + if (!info->attrs[UBCORE_HDR_ARGS_LEN] || + !info->attrs[UBCORE_HDR_ARGS_ADDR]) + return ret; + args_addr = nla_get_u64(info->attrs[UBCORE_HDR_ARGS_ADDR]); + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)args_addr, + sizeof(struct ubcore_cmd_set_eid_mode)); + if (ret != 0) + return -EPERM; + + arg.in.dev_name[UBCORE_MAX_DEV_NAME - 1] = '\0'; + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev_name: %s failed.\n", arg.in.dev_name); + return -EPERM; + } + + if (dev->dynamic_eid == arg.in.eid_mode) { + ubcore_put_device(dev); + return 0; + } + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + ubcore_put_device(dev); + return -EINVAL; + } + + /* change eid mode, need to flush eids */ + event.ub_dev = dev; + event.event_type = UBCORE_EVENT_EID_CHANGE; + for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == true) { + eid = dev->eid_table.eid_entries[i].eid; + if (dev->attr.pattern == (uint8_t)UBCORE_PATTERN_1) + ubcore_clear_pattern1_eid(dev, &eid); + else + ubcore_clear_pattern3_eid(dev, &eid); + event.element.eid_idx = i; + ubcore_dispatch_async_event(&event); + } + } + dev->dynamic_eid = arg.in.eid_mode; + ubcore_put_device(dev); + return 0; +} + +int ubcore_set_ns_mode_ops(struct sk_buff *skb, struct genl_info *info) +{ + uint8_t ns_mode; + + if (!info->attrs[UBCORE_ATTR_NS_MODE]) + return -EINVAL; + + ns_mode = nla_get_u8(info->attrs[UBCORE_ATTR_NS_MODE]); + return ubcore_set_ns_mode((ns_mode == 0 ? false : true)); +} + +int ubcore_set_dev_ns_ops(struct sk_buff *skb, struct genl_info *info) +{ + if (!info->attrs[UBCORE_ATTR_DEV_NAME] || + !info->attrs[UBCORE_ATTR_NS_FD]) + return -EINVAL; + + return ubcore_set_dev_ns( + (char *)nla_data(info->attrs[UBCORE_ATTR_DEV_NAME]), + nla_get_u32(info->attrs[UBCORE_ATTR_NS_FD])); +} + +int ubcore_get_topo_info(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_cmd_topo_info arg = { 0 }; + struct ubcore_topo_map *topo_map; + uint64_t args_addr; + int ret = -EINVAL; + + if (!info->attrs[UBCORE_HDR_ARGS_LEN] || + !info->attrs[UBCORE_HDR_ARGS_ADDR]) + return ret; + args_addr = nla_get_u64(info->attrs[UBCORE_HDR_ARGS_ADDR]); + ret = ubcore_copy_from_user(&arg, (void __user *)(uintptr_t)args_addr, + sizeof(struct ubcore_cmd_topo_info)); + if (ret != 0) + return -EPERM; + topo_map = ubcore_get_global_topo_map(); + if (topo_map == NULL) { + ubcore_log_err("topo map is empty!\n"); + return -1; + } + if (arg.in.node_idx >= topo_map->node_num) { + ubcore_log_err("topo map idx > node_num!\n"); + return -EINVAL; + } + + arg.out.node_num = topo_map->node_num; + (void)memcpy(&arg.out.topo_info, &topo_map->topo_infos[arg.in.node_idx], + sizeof(struct ubcore_topo_info)); + return ubcore_copy_to_user((void __user *)(uintptr_t)args_addr, &arg, + sizeof(struct ubcore_cmd_topo_info)); +} + +static void ubcore_fill_res_binary(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb, int attrtype) +{ + if (nla_put(msg, attrtype, (int)cb->args[CB_ARGS_BUF_LEN], res_buf)) + return; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; +} + +static void ubcore_fill_res_tpg(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb) +{ + uint32_t idx = (uint32_t)cb->args[CB_ARGS_SART_IDX]; + struct ubcore_res_tpg_val *tpg_val = res_buf; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_TPG_TP_CNT) { + if (nla_put_u32(msg, UBCORE_RES_TPG_TP_CNT, tpg_val->tp_cnt)) + return; + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_TPG_DSCP; + } + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_TPG_DSCP) { + if (nla_put_u8(msg, UBCORE_RES_TPG_DSCP, tpg_val->dscp)) + return; + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_TPG_TP_VAL; + } + + for (; idx < tpg_val->tp_cnt; ++idx) { + if (nla_put_u32(msg, UBCORE_RES_TPG_TP_VAL, + tpg_val->tp_list[idx])) + return; + cb->args[CB_ARGS_SART_IDX] = idx; + } + if (idx == tpg_val->tp_cnt) + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; +} + +static void ubcore_fill_res_jtgrp(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct ubcore_res_jetty_group_val *jtgrp_val = res_buf; + uint32_t idx = (uint32_t)cb->args[CB_ARGS_SART_IDX]; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_JTGRP_JETTY_CNT) { + if (nla_put_u32(msg, UBCORE_RES_JTGRP_JETTY_CNT, + jtgrp_val->jetty_cnt)) + return; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JTGRP_JETTY_VAL; + } + + for (; idx < jtgrp_val->jetty_cnt; ++idx) { + if (nla_put_u32(msg, UBCORE_RES_JTGRP_JETTY_VAL, + jtgrp_val->jetty_list[idx])) + return; + + cb->args[CB_ARGS_SART_IDX] = idx; + } + + if (idx == jtgrp_val->jetty_cnt) + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; +} + +static void ubcore_fill_res_seg(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb) +{ + uint32_t idx = (uint32_t)cb->args[CB_ARGS_SART_IDX]; + struct ubcore_res_seg_val *seg_val = res_buf; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_SEGVAL_SEG_CNT) { + if (nla_put_u32(msg, UBCORE_RES_SEGVAL_SEG_CNT, + seg_val->seg_cnt)) + return; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_SEGVAL_SEG_VAL; + } + for (; idx < seg_val->seg_cnt; ++idx) { + if (nla_put(msg, UBCORE_RES_SEGVAL_SEG_VAL, + sizeof(struct ubcore_seg_info), + seg_val->seg_list + idx)) + return; + + cb->args[CB_ARGS_SART_IDX] = idx; + } + + if (idx == seg_val->seg_cnt) + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; +} + +static int ubcore_fill_res_dev_ta_cnt(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct ubcore_res_dev_ta_val *dev_val = res_buf; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_SEG_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_SEG_CNT, dev_val->seg_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_JFS_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_JFS_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_JFS_CNT, dev_val->jfs_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_JFR_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_JFR_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_JFR_CNT, dev_val->jfr_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_JFC_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_JFC_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_JFC_CNT, dev_val->jfc_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_JETTY_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_JETTY_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_JETTY_CNT, + dev_val->jetty_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_JTGRP_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_JTGRP_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_JTGRP_CNT, + dev_val->jetty_group_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_RC_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_RC_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_RC_CNT, dev_val->rc_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; + } + + return 0; +} + +static int ubcore_fill_res_dev_tp_cnt(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct ubcore_res_dev_tp_val *dev_val = res_buf; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_VTP_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_VTP_CNT, dev_val->vtp_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_TP_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_TP_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_TP_CNT, dev_val->tp_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_TPG_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_TPG_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_TPG_CNT, dev_val->tpg_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_UTP_CNT; + } + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_RES_DEV_UTP_CNT) { + if (nla_put_u32(msg, UBCORE_RES_DEV_UTP_CNT, dev_val->utp_cnt)) + return -1; + + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; + } + + return 0; +} + +static int ubcore_fill_res(uint32_t type, void *res_buf, struct sk_buff *skb, + struct netlink_callback *cb) +{ + switch (type) { + case UBCORE_RES_KEY_TPG: + ubcore_fill_res_tpg(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_JETTY_GROUP: + ubcore_fill_res_jtgrp(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_SEG: + ubcore_fill_res_seg(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_DEV_TA: + ubcore_fill_res_dev_ta_cnt(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_DEV_TP: + ubcore_fill_res_dev_tp_cnt(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_VTP: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_VTP_VAL); + break; + case UBCORE_RES_KEY_TP: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_TP_VAL); + break; + case UBCORE_RES_KEY_UTP: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_UTP_VAL); + break; + case UBCORE_RES_KEY_JFS: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_JFS_VAL); + break; + case UBCORE_RES_KEY_JFR: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_JFR_VAL); + break; + case UBCORE_RES_KEY_JETTY: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_JETTY_VAL); + break; + case UBCORE_RES_KEY_JFC: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_JFC_VAL); + break; + case UBCORE_RES_KEY_RC: + ubcore_fill_res_binary(res_buf, skb, cb, UBCORE_RES_RC_VAL); + break; + default: + ubcore_log_err("key type :%u no support.\n", type); + return -1; + } + return 0; +} + +static void ubcore_put_list_res(void *res_buf, struct sk_buff *msg, + struct netlink_callback *cb, int cnt_type, + int val_type) +{ + struct ubcore_res_list_val *reslist = res_buf; + uint32_t idx = (uint32_t)cb->args[CB_ARGS_SART_IDX]; + + if (nla_put_u32(msg, cnt_type, reslist->cnt)) + return; + + for (; idx < reslist->cnt; ++idx) { + if (nla_put_u32(msg, val_type, reslist->list[idx])) + return; + + cb->args[CB_ARGS_SART_IDX] = idx; + } + if (idx == reslist->cnt) + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_ATTR_RES_LAST; +} + +static int ubcore_fill_list_res(uint32_t type, void *res_buf, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + switch (type) { + case UBCORE_RES_KEY_JETTY_GROUP: + ubcore_put_list_res(res_buf, skb, cb, + UBCORE_RES_JTGRP_JETTY_CNT, + UBCORE_RES_JTGRP_JETTY_VAL); + break; + case UBCORE_RES_KEY_SEG: + ubcore_fill_res_seg(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_JFS: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_JFS_CNT, + UBCORE_RES_DEV_JFS_VAL); + break; + case UBCORE_RES_KEY_JFR: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_JFR_CNT, + UBCORE_RES_DEV_JFR_VAL); + break; + case UBCORE_RES_KEY_JETTY: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_JETTY_CNT, + UBCORE_RES_DEV_JETTY_VAL); + break; + case UBCORE_RES_KEY_JFC: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_JFC_CNT, + UBCORE_RES_DEV_JFC_VAL); + break; + case UBCORE_RES_KEY_RC: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_RC_CNT, + UBCORE_RES_DEV_RC_VAL); + break; + case UBCORE_RES_KEY_TPG: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_TPG_CNT, + UBCORE_RES_DEV_TPG_VAL); + break; + case UBCORE_RES_KEY_VTP: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_VTP_CNT, + UBCORE_RES_DEV_VTP_VAL); + break; + case UBCORE_RES_KEY_TP: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_TP_CNT, + UBCORE_RES_DEV_TP_VAL); + break; + case UBCORE_RES_KEY_UTP: + ubcore_put_list_res(res_buf, skb, cb, UBCORE_RES_DEV_UTP_CNT, + UBCORE_RES_DEV_UTP_VAL); + break; + case UBCORE_RES_KEY_DEV_TA: + ubcore_fill_res_dev_ta_cnt(res_buf, skb, cb); + break; + case UBCORE_RES_KEY_DEV_TP: + ubcore_fill_res_dev_tp_cnt(res_buf, skb, cb); + break; + default: + ubcore_log_err("key type :%u no support.\n", type); + return -1; + } + return 0; +} + +static uint32_t ubcore_get_query_res_len(uint32_t type, + struct netlink_callback *cb) +{ + switch (type) { + case UBCORE_RES_KEY_VTP: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_VTP_VAL; + return (uint32_t)sizeof(struct ubcore_res_vtp_val); + case UBCORE_RES_KEY_TP: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_TP_VAL; + return (uint32_t)sizeof(struct ubcore_res_tp_val); + case UBCORE_RES_KEY_TPG: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_TPG_TP_CNT; + return (uint32_t)sizeof(struct ubcore_res_tpg_val); + case UBCORE_RES_KEY_UTP: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_UTP_VAL; + return (uint32_t)sizeof(struct ubcore_res_utp_val); + case UBCORE_RES_KEY_JFS: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JFS_VAL; + return (uint32_t)sizeof(struct ubcore_res_jfs_val); + case UBCORE_RES_KEY_JFR: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JFR_VAL; + return (uint32_t)sizeof(struct ubcore_res_jfr_val); + case UBCORE_RES_KEY_JETTY: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JETTY_VAL; + return (uint32_t)sizeof(struct ubcore_res_jetty_val); + case UBCORE_RES_KEY_JETTY_GROUP: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JTGRP_JETTY_CNT; + return (uint32_t)sizeof(struct ubcore_res_jetty_group_val); + case UBCORE_RES_KEY_JFC: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_JFC_VAL; + return (uint32_t)sizeof(struct ubcore_res_jfc_val); + case UBCORE_RES_KEY_RC: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_RC_VAL; + return (uint32_t)sizeof(struct ubcore_res_rc_val); + case UBCORE_RES_KEY_SEG: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_SEGVAL_SEG_CNT; + return (uint32_t)sizeof(struct ubcore_res_seg_val); + case UBCORE_RES_KEY_DEV_TA: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_SEG_CNT; + return (uint32_t)sizeof(struct ubcore_res_dev_ta_val); + case UBCORE_RES_KEY_DEV_TP: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_DEV_VTP_CNT; + return (uint32_t)sizeof(struct ubcore_res_dev_tp_val); + default: + break; + } + return 0; +} + +static uint32_t ubcore_get_list_res_len(uint32_t type, + struct netlink_callback *cb) +{ + switch (type) { + case UBCORE_RES_KEY_VTP: + case UBCORE_RES_KEY_TP: + case UBCORE_RES_KEY_TPG: + case UBCORE_RES_KEY_UTP: + case UBCORE_RES_KEY_JFS: + case UBCORE_RES_KEY_JFR: + case UBCORE_RES_KEY_JETTY: + case UBCORE_RES_KEY_JETTY_GROUP: + case UBCORE_RES_KEY_JFC: + case UBCORE_RES_KEY_RC: + return (uint32_t)sizeof(struct ubcore_res_list_val); + case UBCORE_RES_KEY_SEG: + cb->args[CB_ARGS_NEXT_TYPE] = UBCORE_RES_SEGVAL_SEG_CNT; + return (uint32_t)sizeof(struct ubcore_res_seg_val); + default: + break; + } + return 0; +} + +static void *ubcore_query_dev_info(struct ubcore_device *dev, + struct ubcore_cmd_query_res *arg, + uint32_t res_len) +{ + struct ubcore_res_key key = { 0 }; + struct ubcore_res_val val = { 0 }; + void *res_buf; + int ret; + + res_buf = kzalloc(res_len, GFP_KERNEL); + if (res_buf == NULL) + return NULL; + + key.type = (uint8_t)arg->in.type; + key.key = arg->in.key; + key.key_ext = arg->in.key_ext; + key.key_cnt = arg->in.key_cnt; + val.addr = (uint64_t)res_buf; + val.len = res_len; + + // urma only alloc memory for the struct + // driver will alloc memory for the list pointer in the struct; urma need to vfree it later + + ret = ubcore_query_resource(dev, &key, &val); + if (ret != 0) { + kfree(res_buf); + res_buf = NULL; + } + + return res_buf; +} + +int ubcore_query_res_start(struct netlink_callback *cb) +{ + struct ubcore_cmd_query_res arg = { 0 }; + struct ubcore_device *dev; + int ret = -EINVAL; + uint32_t res_len; + void *res_buf; + + ret = ubcore_parse_admin_res_cmd(cb, &arg, + sizeof(struct ubcore_cmd_query_res)); + if (ret) + return ret; + + if (arg.in.key_cnt == 0) + res_len = ubcore_get_list_res_len((uint32_t)arg.in.type, cb); + else + res_len = ubcore_get_query_res_len((uint32_t)arg.in.type, cb); + if (res_len == 0) { + ubcore_log_err( + "Failed to check res len, type: %u, res_len: %u.\n", + (uint32_t)arg.in.type, res_len); + return -EINVAL; + } + arg.in.dev_name[UBCORE_MAX_DEV_NAME - 1] = '\0'; + dev = ubcore_find_device_with_name(arg.in.dev_name); + if (dev == NULL) { + ubcore_log_err("find dev failed, arg_in: %s.\n", + arg.in.dev_name); + return -EINVAL; + } + + if (arg.in.type == (uint32_t)UBCORE_RES_KEY_VTP && + dev->attr.virtualization == true) { + ubcore_log_warn( + "UE device do not support query VTP, dev: %s, type: %u.\n", + dev->dev_name, arg.in.type); + ubcore_put_device(dev); + return -EINVAL; + } + + res_buf = ubcore_query_dev_info(dev, &arg, res_len); + if (!res_buf) { + ubcore_put_device(dev); + ubcore_log_err("Failed to query res by arg\n"); + return -1; + } + ubcore_put_device(dev); + cb->args[CB_ARGS_DEV_BUF] = (long)res_buf; + cb->args[CB_ARGS_CMD_TYPE] = (long)arg.in.type; + cb->args[CB_ARGS_SART_IDX] = 0; + cb->args[CB_ARGS_BUF_LEN] = res_len; + cb->args[CB_ARGS_KEY_CNT] = arg.in.key_cnt; + return 0; +} + +static int ubcore_list_res_done(struct netlink_callback *cb) +{ + uint32_t type = (uint32_t)(unsigned long)cb->args[CB_ARGS_CMD_TYPE]; + void *res_buf = (void *)cb->args[CB_ARGS_DEV_BUF]; + struct ubcore_res_seg_val *seg_val; + struct ubcore_res_list_val *list_val; + + switch (type) { + case UBCORE_RES_KEY_JFS: + case UBCORE_RES_KEY_JFR: + case UBCORE_RES_KEY_JETTY: + case UBCORE_RES_KEY_JFC: + case UBCORE_RES_KEY_RC: + case UBCORE_RES_KEY_JETTY_GROUP: + case UBCORE_RES_KEY_VTP: + case UBCORE_RES_KEY_TP: + case UBCORE_RES_KEY_TPG: + case UBCORE_RES_KEY_UTP: + list_val = res_buf; + vfree(list_val->list); + break; + case UBCORE_RES_KEY_SEG: + seg_val = res_buf; + vfree(seg_val->seg_list); + break; + default: + break; + } + kfree(res_buf); + + return 0; +} + +int ubcore_query_res_done(struct netlink_callback *cb) +{ + uint32_t type = (uint32_t)(unsigned long)cb->args[CB_ARGS_CMD_TYPE]; + void *res_buf = (void *)cb->args[CB_ARGS_DEV_BUF]; + struct ubcore_res_jetty_group_val *jtgrp_val; + struct ubcore_res_seg_val *seg_val; + struct ubcore_res_tpg_val *tpg_val; + + if (cb->args[CB_ARGS_KEY_CNT] == 0) + return ubcore_list_res_done(cb); + + switch (type) { + case UBCORE_RES_KEY_TPG: + tpg_val = res_buf; + vfree(tpg_val->tp_list); + break; + case UBCORE_RES_KEY_JETTY_GROUP: + jtgrp_val = res_buf; + vfree(jtgrp_val->jetty_list); + break; + case UBCORE_RES_KEY_SEG: + seg_val = res_buf; + vfree(seg_val->seg_list); + break; + default: + break; + } + kfree(res_buf); + + return 0; +} + +int ubcore_query_res_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + uint32_t type = (uint32_t)cb->args[CB_ARGS_CMD_TYPE]; + void *res_buf = (void *)cb->args[CB_ARGS_DEV_BUF]; + void *hdr; + int ret; + + if (cb->args[CB_ARGS_NEXT_TYPE] == UBCORE_ATTR_RES_LAST) + return 0; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &ubcore_genl_family, NLM_F_MULTI, + UBCORE_CMD_QUERY_RES); + if (!hdr) + return 0; + + if (cb->args[CB_ARGS_KEY_CNT] == 0) + ret = ubcore_fill_list_res(type, res_buf, skb, cb); + else + ret = ubcore_fill_res(type, res_buf, skb, cb); + if (ret < 0) + genlmsg_cancel(skb, hdr); + else + genlmsg_end(skb, hdr); + + return (int)skb->len; +} + +static void ubcore_free_eid_ctx(struct ubcore_update_eid_ctx *ctx) +{ + if (ctx->net) + put_net(ctx->net); + if (ctx->dev) + ubcore_put_device(ctx->dev); + kfree(ctx->req_msg); + if (ctx->s) { + kfree(ctx->s->resp); + ubcore_destroy_msg_session(ctx->s); + } + kfree(ctx); + ubcore_log_info("updata eid done"); +} + +static int ubcore_dump_eid_ret(struct sk_buff *skb, struct netlink_callback *cb, + enum ubcore_cmd cmd_type) +{ + struct ubcore_update_eid_ctx *ctx = + (struct ubcore_update_eid_ctx *)cb->args[0]; + void *hdr; + int ret; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &ubcore_genl_family, NLM_F_MULTI, (uint8_t)cmd_type); + if (!hdr) + return -ENOMEM; + ret = ubcore_update_uvs_eid_ret(ctx); + if (nla_put_s32(skb, UBCORE_UPDATE_EID_RET, ret)) + genlmsg_cancel(skb, hdr); + else + genlmsg_end(skb, hdr); + + return ret; +} + +int ubcore_add_eid_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + return ubcore_dump_eid_ret(skb, cb, UBCORE_CMD_ADD_EID); +} + +int ubcore_delete_eid_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + return ubcore_dump_eid_ret(skb, cb, UBCORE_CMD_DEL_EID); +} + +int ubcore_delete_eid_done(struct netlink_callback *cb) +{ + struct ubcore_update_eid_ctx *ctx = + (struct ubcore_update_eid_ctx *)cb->args[0]; + + ubcore_free_eid_ctx(ctx); + return 0; +} + +int ubcore_add_eid_done(struct netlink_callback *cb) +{ + struct ubcore_update_eid_ctx *ctx = + (struct ubcore_update_eid_ctx *)cb->args[0]; + + ubcore_free_eid_ctx(ctx); + return 0; +} + +int ubcore_delete_eid_start(struct netlink_callback *cb) +{ + return ubcore_update_ueid(cb, UBCORE_MSG_DEALLOC_EID); +} + +int ubcore_add_eid_start(struct netlink_callback *cb) +{ + return ubcore_update_ueid(cb, UBCORE_MSG_ALLOC_EID); +} diff --git a/drivers/ub/urma/ubcore/ubcore_genl_admin.h b/drivers/ub/urma/ubcore/ubcore_genl_admin.h new file mode 100644 index 000000000000..c589eda872c5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_genl_admin.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: define hash table ops for admin + * Author: Zhao Yanchao + * Create: 2024-01-18 + * Note: + * History: 2024-01-18 Zhao Yanchao + */ + +#ifndef UBCORE_GENL_ADMIN_H +#define UBCORE_GENL_ADMIN_H + +#include + +int ubcore_show_utp_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_query_stats_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_query_res_start(struct netlink_callback *cb); +int ubcore_query_res_dump(struct sk_buff *skb, struct netlink_callback *cb); +int ubcore_query_res_done(struct netlink_callback *cb); +int ubcore_set_eid_mode_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_set_ns_mode_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_get_topo_info(struct sk_buff *skb, struct genl_info *info); +int ubcore_set_dev_ns_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_add_eid_start(struct netlink_callback *cb); +int ubcore_add_eid_dump(struct sk_buff *skb, struct netlink_callback *cb); +int ubcore_add_eid_done(struct netlink_callback *cb); +int ubcore_delete_eid_start(struct netlink_callback *cb); +int ubcore_delete_eid_done(struct netlink_callback *cb); +int ubcore_delete_eid_dump(struct sk_buff *skb, struct netlink_callback *cb); + +extern struct genl_family ubcore_genl_family; + +#endif // UBCORE_GENERIC_NETLINK_ADMIN_H diff --git a/drivers/ub/urma/ubcore/ubcore_genl_define.h b/drivers/ub/urma/ubcore/ubcore_genl_define.h new file mode 100644 index 000000000000..07a0e07c34d3 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_genl_define.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore GENERIC NETLINK + * Author: Zhao yanchao + * Create: 2024-01-25 + * Note: + * History: 2024-01-25 Zhao yanchao Add base code + */ + +#ifndef UBCORE_GENERIC_NETLINK_DEFINE_H +#define UBCORE_GENERIC_NETLINK_DEFINE_H + +/* NETLINK_GENERIC related info */ +#define UBCORE_GENL_FAMILY_NAME "UBCORE_GENL" +#define UBCORE_GENL_FAMILY_VERSION 1 + +enum { + UBCORE_ATTR_UNSPEC, + UBCORE_HDR_COMMAND, + UBCORE_HDR_ARGS_LEN, + UBCORE_HDR_ARGS_ADDR, + UBCORE_ATTR_NS_MODE, + UBCORE_ATTR_DEV_NAME, + UBCORE_ATTR_NS_FD, + UBCORE_MSG_SEQ, + UBCORE_MSG_TYPE, + UBCORE_TRANSPORT_TYPE, + UBORE_SRC_ID, + UBORE_DST_ID, + UBCORE_PAYLOAD_DATA, + UBCORE_UPDATE_EID_RET, + UBCORE_ATTR_AFTER_LAST, + NUM_UBCORE_ATTR = UBCORE_ATTR_AFTER_LAST, + UBCORE_ATTR_MAX = UBCORE_ATTR_AFTER_LAST - 1 +}; + +#endif // UBCORE_GENERIC_NETLINK_DEFINE_H diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.c b/drivers/ub/urma/ubcore/ubcore_hash_table.c new file mode 100644 index 000000000000..ba90659ff6e5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: implement hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" + +int ubcore_hash_table_alloc(struct ubcore_hash_table *ht, + const struct ubcore_ht_param *p) +{ + uint32_t i; + + if (p == NULL || p->size == 0) + return -1; + ht->p = *p; + ht->head = kcalloc(p->size, sizeof(struct hlist_head), GFP_KERNEL); + if (ht->head == NULL) + return -ENOMEM; + + for (i = 0; i < p->size; i++) + INIT_HLIST_HEAD(&ht->head[i]); + + spin_lock_init(&ht->lock); + kref_init(&ht->kref); + return 0; +} + +void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, + void (*free_cb)(void *)) +{ + struct hlist_node *pos = NULL, *next = NULL; + struct hlist_head *head; + uint32_t i; + void *obj; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + for (i = 0; i < ht->p.size; i++) { + hlist_for_each_safe(pos, next, &ht->head[i]) { + obj = ubcore_ht_obj(ht, pos); + hlist_del(pos); + spin_unlock(&ht->lock); + if (free_cb != NULL) + free_cb(obj); + else if (ht->p.free_f != NULL) + ht->p.free_f(obj); + else + kfree(obj); + spin_lock(&ht->lock); + } + } + head = ht->head; + ht->head = NULL; + spin_unlock(&ht->lock); + if (head != NULL) + kfree(head); +} + +void ubcore_hash_table_free(struct ubcore_hash_table *ht) +{ + ubcore_hash_table_free_with_cb(ht, NULL); +} + +void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + INIT_HLIST_NODE(hnode); + hlist_add_head(hnode, &ht->head[hash % ht->p.size]); +} + +void ubcore_hash_table_add(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + ubcore_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); +} + +void ubcore_hash_table_remove_nolock(struct ubcore_hash_table *ht, + struct hlist_node *hnode) +{ + if (ht->head == NULL) + return; + + hlist_del_init(hnode); +} + +void ubcore_hash_table_remove(struct ubcore_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + ubcore_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); +} + +int ubcore_hash_table_check_remove(struct ubcore_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + if (hlist_unhashed(hnode)) { + spin_unlock(&ht->lock); + return -EINVAL; + } + ubcore_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); + return 0; +} + +void *ubcore_hash_table_lookup_nolock_get(struct ubcore_hash_table *ht, + uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubcore_ht_obj(ht, pos); + if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) { + break; + } else if (ht->p.key_size > 0 && + memcmp(ubcore_ht_key(ht, pos), key, + ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + if (ht->p.get_f != NULL && obj != NULL) + ht->p.get_f(obj); + + return obj; +} + +void *ubcore_hash_table_lookup_get(struct ubcore_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubcore_hash_table_lookup_nolock_get(ht, hash, key); + + spin_unlock(&ht->lock); + return obj; +} + +void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, + uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubcore_ht_obj(ht, pos); + if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) { + break; + } else if (ht->p.key_size > 0 && + memcmp(ubcore_ht_key(ht, pos), key, + ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + return obj; +} + +void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubcore_hash_table_lookup_nolock(ht, hash, key); + spin_unlock(&ht->lock); + return obj; +} + +/* Do not insert a new entry if an old entry with the same key exists */ +int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -EINVAL; + } + /* Old entry with the same key exists */ + if (ubcore_hash_table_lookup_nolock(ht, hash, + ubcore_ht_key(ht, hnode)) != NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubcore_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, + const void *key) +{ + struct hlist_node *pos = NULL, *next = NULL; + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + hlist_for_each_safe(pos, next, &ht->head[hash % ht->p.size]) { + obj = ubcore_ht_obj(ht, pos); + if (ht->p.cmp_f != NULL && ht->p.cmp_f(obj, key) == 0) { + hlist_del(pos); + break; + } else if (ht->p.key_size > 0 && + memcmp(ubcore_ht_key(ht, pos), key, + ht->p.key_size) == 0) { + hlist_del(pos); + break; + } + obj = NULL; + } + spin_unlock(&ht->lock); + return obj; +} diff --git a/drivers/ub/urma/ubcore/ubcore_hash_table.h b/drivers/ub/urma/ubcore/ubcore_hash_table.h new file mode 100644 index 000000000000..e8a322b05fad --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_hash_table.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: define hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#ifndef UBCORE_HASH_TABLE_H +#define UBCORE_HASH_TABLE_H + +#include + +static inline void *ubcore_ht_obj(const struct ubcore_hash_table *ht, + const struct hlist_node *hnode) +{ + return (char *)hnode - ht->p.node_offset; +} + +static inline void *ubcore_ht_key(const struct ubcore_hash_table *ht, + const struct hlist_node *hnode) +{ + return ((char *)hnode - ht->p.node_offset) + ht->p.key_offset; +} +/* Init ht head, not calloc hash table itself */ +int ubcore_hash_table_alloc(struct ubcore_hash_table *ht, + const struct ubcore_ht_param *p); +/* Free ht head, not release hash table itself */ +void ubcore_hash_table_free(struct ubcore_hash_table *ht); +void ubcore_hash_table_free_with_cb(struct ubcore_hash_table *ht, + void (*free_cb)(void *)); +void ubcore_hash_table_add(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); +void ubcore_hash_table_add_nolock(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); +void ubcore_hash_table_remove(struct ubcore_hash_table *ht, + struct hlist_node *hnode); +int ubcore_hash_table_check_remove(struct ubcore_hash_table *ht, + struct hlist_node *hnode); +void ubcore_hash_table_remove_nolock(struct ubcore_hash_table *ht, + struct hlist_node *hnode); +void *ubcore_hash_table_lookup(struct ubcore_hash_table *ht, uint32_t hash, + const void *key); +void *ubcore_hash_table_lookup_nolock(struct ubcore_hash_table *ht, + uint32_t hash, const void *key); +void *ubcore_hash_table_lookup_get(struct ubcore_hash_table *ht, uint32_t hash, + const void *key); +void *ubcore_hash_table_lookup_nolock_get(struct ubcore_hash_table *ht, + uint32_t hash, const void *key); +void *ubcore_hash_table_find_remove(struct ubcore_hash_table *ht, uint32_t hash, + const void *key); +/* Do not insert a new entry if an old entry with the same key exists */ +int ubcore_hash_table_find_add(struct ubcore_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_jetty.c b/drivers/ub/urma/ubcore/ubcore_jetty.c new file mode 100644 index 000000000000..4258c11edd10 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_jetty.c @@ -0,0 +1,2682 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore jetty kernel module + * Author: Ouyang Changchun + * Create: 2021-11-25 + * Note: + * History: 2021-11-25: create file + * History: 2022-07-28: Yan Fangfang move jetty implementation here + */ + +#include +#include +#include +#include +#include +#include + +#include "ubcore_connect_adapter.h" +#include "ubcore_connect_bonding.h" +#include "ubcore_log.h" +#include +#include +#include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" +#include "ubcore_vtp.h" +#include "ubcore_tpg.h" +#include "ubcore_device.h" +#include + +static void ubcore_jfs_kref_release(struct kref *ref_cnt) +{ + struct ubcore_jfs *jfs = + container_of(ref_cnt, struct ubcore_jfs, ref_cnt); + + complete(&jfs->comp); +} + +void ubcore_put_jfs(struct ubcore_jfs *jfs) +{ + if (jfs != NULL) + (void)kref_put(&jfs->ref_cnt, ubcore_jfs_kref_release); +} + +void ubcore_jfs_get(void *obj) +{ + struct ubcore_jfs *jfs = obj; + + kref_get(&jfs->ref_cnt); +} + +static void ubcore_jfr_kref_release(struct kref *ref_cnt) +{ + struct ubcore_jfr *jfr = + container_of(ref_cnt, struct ubcore_jfr, ref_cnt); + + complete(&jfr->comp); +} + +void ubcore_put_jfr(struct ubcore_jfr *jfr) +{ + if (jfr != NULL) + (void)kref_put(&jfr->ref_cnt, ubcore_jfr_kref_release); +} + +void ubcore_jfr_get(void *obj) +{ + struct ubcore_jfr *jfr = obj; + + kref_get(&jfr->ref_cnt); +} + +static void ubcore_jetty_kref_release(struct kref *ref_cnt) +{ + struct ubcore_jetty *jetty = + container_of(ref_cnt, struct ubcore_jetty, ref_cnt); + + complete(&jetty->comp); +} + +void ubcore_put_jetty(struct ubcore_jetty *jetty) +{ + if (jetty != NULL) + (void)kref_put(&jetty->ref_cnt, ubcore_jetty_kref_release); +} + +void ubcore_jetty_get(void *obj) +{ + struct ubcore_jetty *jetty = obj; + + kref_get(&jetty->ref_cnt); +} + +struct ubcore_jfs *ubcore_find_get_jfs(struct ubcore_device *dev, + uint32_t jfs_id) +{ + if (dev == NULL) { + ubcore_log_err("dev is NULL\n"); + return NULL; + } + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_JFS], jfs_id, + &jfs_id); +} + +struct ubcore_jfr *ubcore_find_get_jfr(struct ubcore_device *dev, + uint32_t jfr_id) +{ + if (dev == NULL) { + ubcore_log_err("dev is NULL\n"); + return NULL; + } + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_JFR], jfr_id, + &jfr_id); +} + +struct ubcore_jetty *ubcore_find_get_jetty(struct ubcore_device *dev, + uint32_t jetty_id) +{ + if (dev == NULL) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_JETTY], jetty_id, + &jetty_id); +} + +struct ubcore_jfc *ubcore_find_jfc(struct ubcore_device *dev, uint32_t jfc_id) +{ + if (dev == NULL) { + ubcore_log_err("dev is NULL\n"); + return NULL; + } + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFC], jfc_id, + &jfc_id); +} +EXPORT_SYMBOL(ubcore_find_jfc); + +struct ubcore_jfs *ubcore_find_jfs(struct ubcore_device *dev, uint32_t jfs_id) +{ + if (dev == NULL) { + ubcore_log_err("dev is NULL\n"); + return NULL; + } + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFS], jfs_id, + &jfs_id); +} +EXPORT_SYMBOL(ubcore_find_jfs); + +struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id) +{ + if (dev == NULL) { + ubcore_log_err("dev is NULL\n"); + return NULL; + } + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JFR], jfr_id, + &jfr_id); +} +EXPORT_SYMBOL(ubcore_find_jfr); + +static int check_and_fill_jfc_attr(struct ubcore_jfc_cfg *cfg, + struct ubcore_jfc_cfg *user) +{ + if (cfg->depth < user->depth) + return -1; + + /* store the immutable and skip the driver updated depth */ + cfg->flag = user->flag; + cfg->jfc_context = user->jfc_context; + return 0; +} + +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, + struct ubcore_jfc_cfg *cfg, + ubcore_comp_callback_t jfce_handler, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfc *jfc; + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->create_jfc == NULL || dev->ops->destroy_jfc == NULL) + return ERR_PTR(-EINVAL); + + jfc = dev->ops->create_jfc(dev, cfg, udata); + if (IS_ERR_OR_NULL(jfc)) { + ubcore_log_err("failed to create jfc.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(jfc, ENOSPC); + } + + if (check_and_fill_jfc_attr(&jfc->jfc_cfg, cfg) != 0) { + (void)dev->ops->destroy_jfc(jfc); + ubcore_log_err("jfc cfg is not qualified.\n"); + return ERR_PTR(-EINVAL); + } + jfc->jfce_handler = jfce_handler; + jfc->jfae_handler = jfae_handler; + jfc->ub_dev = dev; + jfc->uctx = ubcore_get_uctx(udata); + atomic_set(&jfc->use_cnt, 0); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFC], &jfc->hnode, + jfc->id); + if (ret != 0) { + (void)dev->ops->destroy_jfc(jfc); + ubcore_log_err("Failed to add jfc.\n"); + return ERR_PTR(ret); + } + return jfc; +} +EXPORT_SYMBOL(ubcore_create_jfc); + +int ubcore_modify_jfc(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfc_id; + int ret; + + if (jfc == NULL || attr == NULL || jfc->ub_dev == NULL || + jfc->ub_dev->ops == NULL || jfc->ub_dev->ops->modify_jfc == NULL) + return -EINVAL; + + jfc_id = jfc->id; + dev = jfc->ub_dev; + + ret = dev->ops->modify_jfc(jfc, attr, udata); + if (ret != 0) + ubcore_log_err("UBEP failed to modify jfc, jfc_id:%u.\n", + jfc_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfc); + +int ubcore_delete_jfc(struct ubcore_jfc *jfc) +{ + struct ubcore_device *dev; + uint32_t jfc_id; + int ret; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->destroy_jfc == NULL) + return -1; + + if (atomic_read(&jfc->use_cnt)) { + ubcore_log_err("The jfc is still being used, use_cnt is %d", + atomic_read(&jfc->use_cnt)); + return -EBUSY; + } + + jfc_id = jfc->id; + dev = jfc->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFC], &jfc->hnode); + ret = dev->ops->destroy_jfc(jfc); + if (ret != 0) + ubcore_log_err("UBEP failed to destroy jfc, jfc_id:%u.\n", + jfc_id); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfc); + +int ubcore_delete_jfc_batch(struct ubcore_jfc **jfc_arr, int jfc_num, + int *bad_jfc_index) +{ + struct ubcore_device *dev = NULL; + struct ubcore_jfc *jfc = NULL; + uint32_t jfc_id; + uint32_t i; + int ret; + + if (jfc_arr == NULL || jfc_num <= 0 || bad_jfc_index == NULL) { + ubcore_log_err("Invalid parameter."); + return -EINVAL; + } + + for (i = 0; i < jfc_num; ++i) { + jfc = jfc_arr[i]; + if (jfc == NULL || jfc->ub_dev == NULL || + jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->destroy_jfc_batch == NULL) { + *bad_jfc_index = 0; + ubcore_log_err("Invalid parameter, index is %d", i); + return -EINVAL; + } + + if (atomic_read(&jfc->use_cnt)) { + ubcore_log_err( + "The jfc is still being used, index is %u", i); + ubcore_log_debug("jfc->use_cnt is %d", + atomic_read(&jfc->use_cnt)); + *bad_jfc_index = 0; + return -EBUSY; + } + } + + for (i = 0; i < jfc_num; ++i) { + jfc = jfc_arr[i]; + jfc_id = jfc->id; + dev = jfc->ub_dev; + ubcore_hash_table_remove(&dev->ht[UBCORE_HT_JFC], &jfc->hnode); + } + + ret = dev->ops->destroy_jfc_batch(jfc_arr, jfc_num, bad_jfc_index); + if (ret != 0) + ubcore_log_err( + "driver failed to destroy jfc batch, index: %d.\n", + *bad_jfc_index); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfc_batch); + +static int check_jfs_cfg(struct ubcore_device *dev, struct ubcore_jfs_cfg *cfg) +{ + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", + (int)cfg->trans_mode); + return -EINVAL; + } + + if (cfg->depth == 0 || cfg->depth > dev->attr.dev_cap.max_jfs_depth) { + ubcore_log_err("Invalid parameter, depth:%u, max_depth:%u.\n", + cfg->depth, dev->attr.dev_cap.max_jfs_depth); + return -EINVAL; + } + if (cfg->max_inline_data != 0 && + cfg->max_inline_data > dev->attr.dev_cap.max_jfs_inline_size) { + ubcore_log_err( + "Invalid parameter, inline_data:%u, max_inline_len:%u.\n", + cfg->max_inline_data, + dev->attr.dev_cap.max_jfs_inline_size); + return -EINVAL; + } + if (cfg->max_sge > dev->attr.dev_cap.max_jfs_sge) { + ubcore_log_err("Invalid parameter, sge:%u, max_sge:%u.\n", + cfg->max_sge, dev->attr.dev_cap.max_jfs_sge); + return -EINVAL; + } + if (cfg->max_rsge > dev->attr.dev_cap.max_jfs_rsge) { + ubcore_log_err("Invalid parameter, rsge:%u, max_rsge:%u.\n", + cfg->max_rsge, dev->attr.dev_cap.max_jfs_rsge); + return -EINVAL; + } + return 0; +} + +static int check_and_fill_jfs_attr(struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_cfg *user) +{ + if (cfg->depth < user->depth || cfg->max_sge < user->max_sge || + cfg->max_rsge < user->max_rsge || + cfg->max_inline_data < user->max_inline_data) + return -1; + + /* store the immutable and skip the driver updated attributes including depth, + * max_sge and max_inline_data + */ + cfg->flag = user->flag; + cfg->eid_index = user->eid_index; + cfg->priority = user->priority; + cfg->rnr_retry = user->rnr_retry; + cfg->err_timeout = user->err_timeout; + cfg->trans_mode = user->trans_mode; + cfg->jfs_context = user->jfs_context; + cfg->jfc = user->jfc; + return 0; +} + +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, + struct ubcore_jfs_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfs *jfs; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->create_jfs == NULL || + dev->ops->destroy_jfs == NULL || cfg == NULL || cfg->jfc == NULL || + !ubcore_eid_valid(dev, cfg->eid_index, udata)) + return ERR_PTR(-EINVAL); + + if (((uint16_t)cfg->trans_mode & dev->attr.dev_cap.trans_mode) == 0) { + ubcore_log_err("jfs cfg is not supported.\n"); + return ERR_PTR(-EINVAL); + } + if (check_jfs_cfg(dev, cfg) != 0) + return ERR_PTR(-EINVAL); + + jfs = dev->ops->create_jfs(dev, cfg, udata); + if (IS_ERR_OR_NULL(jfs)) { + ubcore_log_err("failed to create jfs.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(jfs, ENOSPC); + } + + /* Prevent ubcore private data from being modified */ + if (check_and_fill_jfs_attr(&jfs->jfs_cfg, cfg) != 0) { + (void)dev->ops->destroy_jfs(jfs); + ubcore_log_err("jfs cfg is not qualified.\n"); + return ERR_PTR(-EINVAL); + } + jfs->ub_dev = dev; + jfs->uctx = ubcore_get_uctx(udata); + jfs->jfae_handler = jfae_handler; + jfs->jfs_id.eid = dev->eid_table.eid_entries[cfg->eid_index].eid; + atomic_set(&jfs->use_cnt, 0); + kref_init(&jfs->ref_cnt); + init_completion(&jfs->comp); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFS], &jfs->hnode, + jfs->jfs_id.id); + if (ret != 0) { + ubcore_destroy_tptable(&jfs->tptable); + (void)dev->ops->destroy_jfs(jfs); + ubcore_log_err("Failed to add jfs.\n"); + return ERR_PTR(ret); + } + + atomic_inc(&cfg->jfc->use_cnt); + return jfs; +} +EXPORT_SYMBOL(ubcore_create_jfs); + +int ubcore_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || attr == NULL || jfs->ub_dev == NULL || + jfs->ub_dev->ops == NULL || jfs->ub_dev->ops->modify_jfs == NULL) + return -EINVAL; + + jfs_id = jfs->jfs_id.id; + dev = jfs->ub_dev; + ret = dev->ops->modify_jfs(jfs, attr, udata); + if (ret != 0) + ubcore_log_err("UBEP failed to modify jfs, jfs_id:%u.\n", + jfs_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfs); + +int ubcore_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || cfg == NULL || attr == NULL || jfs->ub_dev == NULL || + jfs->ub_dev->ops == NULL || jfs->ub_dev->ops->query_jfs == NULL) + return -EINVAL; + + jfs_id = jfs->jfs_id.id; + dev = jfs->ub_dev; + ret = dev->ops->query_jfs(jfs, cfg, attr); + if (ret != 0) + ubcore_log_err("UBEP failed to query jfs, jfs_id:%u.\n", + jfs_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jfs); + +int ubcore_delete_jfs(struct ubcore_jfs *jfs) +{ + struct ubcore_device *dev; + struct ubcore_jfc *jfc; + uint32_t jfs_id; + int ret; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->destroy_jfs == NULL) + return -EINVAL; + + jfc = jfs->jfs_cfg.jfc; + jfs_id = jfs->jfs_id.id; + dev = jfs->ub_dev; + + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JFS], + &jfs->hnode); + ubcore_destroy_tptable(&jfs->tptable); + + ubcore_put_jfs(jfs); + wait_for_completion(&jfs->comp); + + ret = dev->ops->destroy_jfs(jfs); + if (ret != 0) { + ubcore_log_err("UBEP failed to destroy jfs, jfs_id:%u.\n", + jfs_id); + kref_init(&jfs->ref_cnt); + return ret; + } + + atomic_dec(&jfc->use_cnt); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfs); + +int ubcore_delete_jfs_batch(struct ubcore_jfs **jfs_arr, int jfs_num, + int *bad_jfs_index) +{ + struct ubcore_device *dev = NULL; + struct ubcore_jfc **jfc = NULL; + struct ubcore_jfs *jfs = NULL; + int bad_index = 0; + uint32_t jfs_id; + uint32_t i; + int ret; + + if (jfs_arr == NULL || jfs_num <= 0 || bad_jfs_index == NULL) { + ubcore_log_err("Invalid parameter."); + return -EINVAL; + } + + for (i = 0; i < jfs_num; ++i) { + jfs = jfs_arr[i]; + if (jfs == NULL || jfs->ub_dev == NULL || + jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->destroy_jfs_batch == NULL) { + *bad_jfs_index = 0; + ubcore_log_err("Invalid parameter, index is %d", i); + return -EINVAL; + } + } + + jfc = kcalloc(jfs_num, sizeof(struct ubcore_jfc *), GFP_KERNEL); + if (jfc == NULL) { + *bad_jfs_index = 0; + return -ENOMEM; + } + + for (i = 0; i < jfs_num; ++i) { + jfs = jfs_arr[i]; + jfc[i] = jfs->jfs_cfg.jfc; + jfs_id = jfs->jfs_id.id; + dev = jfs->ub_dev; + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JFS], + &jfs->hnode); + ubcore_destroy_tptable(&jfs->tptable); + + ubcore_put_jfs(jfs); + wait_for_completion(&jfs->comp); + } + + ret = dev->ops->destroy_jfs_batch(jfs_arr, jfs_num, bad_jfs_index); + bad_index = jfs_num; + if (ret != 0) { + ubcore_log_err( + "driver failed to destroy jfs batch, index: %d.\n", + *bad_jfs_index); + if (ret == -EINVAL) + bad_index = 0; + else + bad_index = *bad_jfs_index; + if (bad_index >= jfs_num) { + ubcore_log_err( + "driver return bad_jfs_index %d out of range, jfs_num is %d.\n", + *bad_jfs_index, jfs_num); + *bad_jfs_index = 0; + bad_index = jfs_num; + ret = -EFAULT; + } + for (i = bad_index; i < jfs_num; ++i) + kref_init(&jfs_arr[i]->ref_cnt); + } + + for (i = 0; i < bad_index; ++i) { + atomic_dec(&jfc[i]->use_cnt); + ubcore_log_info("jfc->use_cnt is: %d.\n", + atomic_read(&jfc[i]->use_cnt)); + } + + kfree(jfc); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfs_batch); + +int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->flush_jfs == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfs->ub_dev->ops; + return dev_ops->flush_jfs(jfs, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_flush_jfs); + +static int check_and_fill_jfr_attr(struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_cfg *user) +{ + if (cfg->depth < user->depth || cfg->max_sge < user->max_sge) + return -1; + + /* store the immutable and skip the driver updated attributes including depth, max_sge */ + cfg->eid_index = user->eid_index; + cfg->flag = user->flag; + cfg->min_rnr_timer = user->min_rnr_timer; + cfg->trans_mode = user->trans_mode; + cfg->token_value = user->token_value; + cfg->jfr_context = user->jfr_context; + cfg->jfc = user->jfc; + return 0; +} + +static int ubcore_check_jfr_cfg(struct ubcore_jfr_cfg *cfg) +{ + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", + (int)cfg->trans_mode); + return -1; + } + + return 0; +} + +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, + struct ubcore_jfr_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jfr *jfr; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->create_jfr == NULL || + dev->ops->destroy_jfr == NULL || cfg == NULL || cfg->jfc == NULL || + !ubcore_eid_valid(dev, cfg->eid_index, udata)) + return ERR_PTR(-EINVAL); + + if (ubcore_check_jfr_cfg(cfg) != 0) + return ERR_PTR(-EINVAL); + + jfr = dev->ops->create_jfr(dev, cfg, udata); + if (IS_ERR_OR_NULL(jfr)) { + ubcore_log_err("failed to create jfr.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(jfr, ENOSPC); + } + + if (check_and_fill_jfr_attr(&jfr->jfr_cfg, cfg) != 0) { + ubcore_log_err("jfr cfg is not qualified.\n"); + (void)dev->ops->destroy_jfr(jfr); + return ERR_PTR(-EINVAL); + } + jfr->ub_dev = dev; + jfr->uctx = ubcore_get_uctx(udata); + jfr->jfae_handler = jfae_handler; + jfr->jfr_id.eid = dev->eid_table.eid_entries[cfg->eid_index].eid; + atomic_set(&jfr->use_cnt, 0); + kref_init(&jfr->ref_cnt); + init_completion(&jfr->comp); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JFR], &jfr->hnode, + jfr->jfr_id.id); + if (ret != 0) { + ubcore_destroy_tptable(&jfr->tptable); + (void)dev->ops->destroy_jfr(jfr); + ubcore_log_err("Failed to add jfr.\n"); + return ERR_PTR(ret); + } + + atomic_inc(&cfg->jfc->use_cnt); + return jfr; +} +EXPORT_SYMBOL(ubcore_create_jfr); + +int ubcore_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || attr == NULL || jfr->ub_dev == NULL || + jfr->ub_dev->ops == NULL || jfr->ub_dev->ops->modify_jfr == NULL) + return -EINVAL; + + jfr_id = jfr->jfr_id.id; + dev = jfr->ub_dev; + ret = dev->ops->modify_jfr(jfr, attr, udata); + if (ret != 0) + ubcore_log_err("UBEP failed to modify jfr, jfr_id:%u.\n", + jfr_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jfr); + +int ubcore_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || cfg == NULL || attr == NULL || jfr->ub_dev == NULL || + jfr->ub_dev->ops == NULL || jfr->ub_dev->ops->query_jfr == NULL) + return -EINVAL; + + jfr_id = jfr->jfr_id.id; + dev = jfr->ub_dev; + ret = dev->ops->query_jfr(jfr, cfg, attr); + if (ret != 0) + ubcore_log_err("UBEP failed to query jfr, jfr_id:%u.\n", + jfr_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jfr); + +int ubcore_delete_jfr(struct ubcore_jfr *jfr) +{ + struct ubcore_device *dev; + struct ubcore_jfc *jfc; + uint32_t jfr_id; + int ret; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops == NULL || + jfr->ub_dev->ops->destroy_jfr == NULL) + return -EINVAL; + + if (atomic_read(&jfr->use_cnt)) { + ubcore_log_err("The jfr is still being used"); + return -EBUSY; + } + + jfc = jfr->jfr_cfg.jfc; + jfr_id = jfr->jfr_id.id; + dev = jfr->ub_dev; + + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JFR], + &jfr->hnode); + ubcore_destroy_tptable(&jfr->tptable); + + ubcore_put_jfr(jfr); + wait_for_completion(&jfr->comp); + + ret = dev->ops->destroy_jfr(jfr); + if (ret != 0) { + ubcore_log_err( + "UBEP failed to destroy jfr, jfr_id:%u. ret:%u\n", + jfr_id, ret); + kref_init(&jfr->ref_cnt); + return ret; + } + + atomic_dec(&jfc->use_cnt); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfr); + +int ubcore_delete_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, + int *bad_jfr_index) +{ + struct ubcore_device *dev = NULL; + struct ubcore_jfc **jfc = NULL; + struct ubcore_jfr *jfr = NULL; + int bad_index = 0; + uint32_t jfr_id; + uint32_t i; + int ret; + + if (jfr_arr == NULL || jfr_num <= 0 || bad_jfr_index == NULL) { + ubcore_log_err("Invalid parameter."); + return -EINVAL; + } + + jfc = kcalloc(jfr_num, sizeof(struct ubcore_jfc *), GFP_KERNEL); + if (jfc == NULL) { + *bad_jfr_index = 0; + return -ENOMEM; + } + + for (i = 0; i < jfr_num; ++i) { + jfr = jfr_arr[i]; + jfc[i] = jfr->jfr_cfg.jfc; + if (jfr == NULL || jfr->ub_dev == NULL || + jfr->ub_dev->ops == NULL || + jfr->ub_dev->ops->destroy_jfr_batch == NULL) { + *bad_jfr_index = 0; + ubcore_log_err("Invalid parameter, index is %d", i); + return -EINVAL; + } + + if (atomic_read(&jfr->use_cnt)) { + ubcore_log_err( + "The jfr is still being used, index is %u", i); + ubcore_log_debug("jfr->use_cnt is %d", + atomic_read(&jfr->use_cnt)); + *bad_jfr_index = 0; + return -EBUSY; + } + } + + for (i = 0; i < jfr_num; ++i) { + jfr = jfr_arr[i]; + jfr_id = jfr->jfr_id.id; + dev = jfr->ub_dev; + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JFR], + &jfr->hnode); + ubcore_destroy_tptable(&jfr->tptable); + + ubcore_put_jfr(jfr); + wait_for_completion(&jfr->comp); + } + + ret = dev->ops->destroy_jfr_batch(jfr_arr, jfr_num, bad_jfr_index); + bad_index = jfr_num; + if (ret != 0) { + ubcore_log_err( + "driver failed to destroy jfr batch, index: %d.\n", + *bad_jfr_index); + if (ret == -EINVAL) + bad_index = 0; + else + bad_index = *bad_jfr_index; + if (bad_index >= jfr_num) { + ubcore_log_err( + "driver return bad_jfr_index %d out of range, jfr_num is %d.\n", + *bad_jfr_index, jfr_num); + *bad_jfr_index = 0; + bad_index = jfr_num; + ret = -EFAULT; + } + for (i = bad_index; i < jfr_num; ++i) + kref_init(&jfr_arr[i]->ref_cnt); + } + + for (i = 0; i < bad_index; ++i) { + atomic_dec(&jfc[i]->use_cnt); + ubcore_log_info("jfc->use_cnt is: %d.\n", + atomic_read(&jfc[i]->use_cnt)); + } + + kfree(jfc); + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jfr_batch); + +struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_vtpn *vtpn = NULL; + struct ubcore_tjetty *tjfr; + + if (!ubcore_have_ops(dev) || dev->ops->unimport_jfr == NULL || + cfg == NULL || dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + if (ubcore_check_ctrlplane_compat(dev->ops->import_jfr)) + return ubcore_import_jfr_compat(dev, cfg, udata); + + if (ubcore_is_bonding_dev(dev)) { + if (ubcore_connect_exchange_udata_when_import_jetty( + cfg, udata, true) != 0) { + ubcore_log_err( + "Failed to exchange udata when import jfr\n"); + return ERR_PTR(-ENOEXEC); + } + } + + tjfr = dev->ops->import_jfr(dev, cfg, udata); + if (IS_ERR_OR_NULL(tjfr)) { + ubcore_log_err("UBEP failed to import jfr, jfr_id:%u.\n", + cfg->id.id); + if (tjfr == NULL) + return ERR_PTR(-ENOEXEC); + return tjfr; + } + tjfr->cfg = *cfg; + tjfr->ub_dev = dev; + tjfr->uctx = ubcore_get_uctx(udata); + atomic_set(&tjfr->use_cnt, 0); + mutex_init(&tjfr->lock); + + /* create rm tp if the remote eid is not connected */ + if (!ubcore_is_bonding_dev(dev) && + dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM)) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjfr->lock); + vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjfr->lock); + mutex_destroy(&tjfr->lock); + (void)dev->ops->unimport_jfr(tjfr); + ubcore_log_err("Failed to setup tp connection.\n"); + if (vtpn == NULL) + return ERR_PTR(-ECONNREFUSED); + return (void *)vtpn; + } + tjfr->vtpn = vtpn; + mutex_unlock(&tjfr->lock); + } else { + tjfr->vtpn = NULL; + } + tjfr->tp = NULL; + return tjfr; +} +EXPORT_SYMBOL(ubcore_import_jfr); + +struct ubcore_tjetty * +ubcore_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_vtpn *vtpn = NULL; + struct ubcore_tjetty *tjfr; + + if (dev == NULL || dev->ops == NULL || + dev->ops->import_jfr_ex == NULL || dev->ops->unimport_jfr == NULL || + cfg == NULL || active_tp_cfg == NULL || + dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + tjfr = dev->ops->import_jfr_ex(dev, cfg, active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjfr)) { + ubcore_log_err("UBEP failed to import jfr, jfr_id:%u.\n", + cfg->id.id); + if (tjfr == NULL) + return ERR_PTR(-ENOEXEC); + return tjfr; + } + tjfr->cfg = *cfg; + tjfr->ub_dev = dev; + tjfr->uctx = ubcore_get_uctx(udata); + atomic_set(&tjfr->use_cnt, 0); + mutex_init(&tjfr->lock); + + /* create rm tp if the remote eid is not connected */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM)) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjfr->lock); + vtpn = ubcore_connect_vtp_ctrlplane(dev, &vtp_param, + active_tp_cfg, udata); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjfr->lock); + mutex_destroy(&tjfr->lock); + (void)dev->ops->unimport_jfr(tjfr); + ubcore_log_err("Failed to setup tp connection.\n"); + if (vtpn == NULL) + return ERR_PTR(-ECONNREFUSED); + return (void *)vtpn; + } + tjfr->vtpn = vtpn; + mutex_unlock(&tjfr->lock); + } else { + tjfr->vtpn = NULL; + } + tjfr->tp = NULL; + return tjfr; +} +EXPORT_SYMBOL(ubcore_import_jfr_ex); + +int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct ubcore_device *dev; + int ret; + + if (tjfr == NULL || tjfr->ub_dev == NULL || tjfr->ub_dev->ops == NULL || + tjfr->ub_dev->ops->unimport_jfr == NULL || + !ubcore_have_ops(tjfr->ub_dev)) + return -EINVAL; + + dev = tjfr->ub_dev; + if (!ubcore_is_bonding_dev(dev) && + dev->transport_type == UBCORE_TRANSPORT_UB && + (tjfr->cfg.trans_mode == UBCORE_TP_RM || + tjfr->cfg.trans_mode == UBCORE_TP_UM) && + tjfr->vtpn != NULL) { + mutex_lock(&tjfr->lock); + ret = ubcore_disconnect_vtp(tjfr->vtpn); + if (ret != 0) { + ubcore_log_err("Failed to disconnect vtp.\n"); + mutex_unlock(&tjfr->lock); + return ret; + } + tjfr->vtpn = NULL; + mutex_unlock(&tjfr->lock); + } + mutex_destroy(&tjfr->lock); + return dev->ops->unimport_jfr(tjfr); +} +EXPORT_SYMBOL(ubcore_unimport_jfr); + +static int check_and_fill_jetty_attr(struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_cfg *user) +{ + if (cfg->jfs_depth < user->jfs_depth || + cfg->max_send_sge < user->max_send_sge || + cfg->max_send_rsge < user->max_send_rsge || + cfg->max_inline_data < user->max_inline_data) { + ubcore_log_err("send attributes are not qualified.\n"); + return -1; + } + if (cfg->jfr_depth < user->jfr_depth || + cfg->max_recv_sge < user->max_recv_sge) { + ubcore_log_err("recv attributes are not qualified.\n"); + return -1; + } + /* store the immutable and skip the driver updated send and recv attributes */ + cfg->eid_index = user->eid_index; + cfg->flag = user->flag; + cfg->send_jfc = user->send_jfc; + cfg->recv_jfc = user->recv_jfc; + cfg->jfr = user->jfr; + cfg->priority = user->priority; + cfg->rnr_retry = user->rnr_retry; + cfg->err_timeout = user->err_timeout; + cfg->min_rnr_timer = user->min_rnr_timer; + cfg->trans_mode = user->trans_mode; + cfg->jetty_context = user->jetty_context; + cfg->token_value = user->token_value; + return 0; +} + +static int check_jetty_cfg(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg) +{ + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", + (int)cfg->trans_mode); + return -1; + } + + if (cfg->send_jfc == NULL || cfg->recv_jfc == NULL) { + ubcore_log_err("jfc is null.\n"); + return -1; + } + + if (cfg->flag.bs.share_jfr == 0 && + dev->transport_type == UBCORE_TRANSPORT_UB) { + ubcore_log_err("UB dev should use share jfr"); + return -1; + } + if (cfg->flag.bs.share_jfr != 0 && + (cfg->jfr == NULL || + cfg->jfr->jfr_cfg.trans_mode != cfg->trans_mode || + cfg->jfr->jfr_cfg.flag.bs.order_type != cfg->flag.bs.order_type)) { + ubcore_log_err( + "jfr is null or trans_mode invalid with shared jfr flag.\n"); + return -1; + } + return 0; +} + +static int check_jetty_cfg_with_jetty_grp(struct ubcore_jetty_cfg *cfg) +{ + if (cfg->jetty_grp == NULL) + return 0; + + if (cfg->trans_mode != UBCORE_TP_RM) + return -1; + if (cfg->token_value.token != + cfg->jetty_grp->jetty_grp_cfg.token_value.token) + return -1; + + if (cfg->flag.bs.share_jfr == 1 && + (cfg->jfr == NULL || + cfg->token_value.token != cfg->jfr->jfr_cfg.token_value.token || + cfg->jetty_grp->jetty_grp_cfg.flag.bs.token_policy != + cfg->jfr->jfr_cfg.flag.bs.token_policy || + cfg->jfr->jfr_cfg.trans_mode != UBCORE_TP_RM)) + return -1; + + return 0; +} + +static int check_jetty_check_dev_cap(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg) +{ + struct ubcore_device_cap *cap = &dev->attr.dev_cap; + + if (cfg->jetty_grp != NULL) { + mutex_lock(&cfg->jetty_grp->lock); + if (cfg->jetty_grp->jetty_cnt >= cap->max_jetty_in_jetty_grp) { + mutex_unlock(&cfg->jetty_grp->lock); + ubcore_log_err( + "jetty_grp jetty cnt:%u, max_jetty in grp:%u.\n", + cfg->jetty_grp->jetty_cnt, + cap->max_jetty_in_jetty_grp); + return -1; + } + mutex_unlock(&cfg->jetty_grp->lock); + } + + if (cfg->jfs_depth == 0 || cfg->jfs_depth > cap->max_jfs_depth) { + ubcore_log_err( + "Invalid parameter, jfs_depth:%u, max_jfs_depth: %u.\n", + cfg->jfs_depth, cap->max_jfs_depth); + return -EINVAL; + } + if (cfg->max_inline_data != 0 && + cfg->max_inline_data > cap->max_jfs_inline_size) { + ubcore_log_err( + "Invalid parameter, inline_data:%u, max_jfs_inline_len: %u.\n", + cfg->max_inline_data, cap->max_jfs_inline_size); + return -EINVAL; + } + if (cfg->max_send_sge > cap->max_jfs_sge) { + ubcore_log_err( + "Invalid parameter, jfs_sge:%u, max_jfs_sge:%u.\n", + cfg->max_send_sge, cap->max_jfs_sge); + return -EINVAL; + } + if (cfg->max_send_rsge > cap->max_jfs_rsge) { + ubcore_log_err( + "Invalid parameter, jfs_rsge:%u, max_jfs_rsge:%u.\n", + cfg->max_send_rsge, cap->max_jfs_rsge); + return -EINVAL; + } + + if (cfg->flag.bs.share_jfr == 0) { + if (cfg->jfr_depth == 0 || + cfg->jfr_depth > cap->max_jfr_depth) { + ubcore_log_err( + "Invalid parameter, jfr_depth:%u, max_jfr_depth: %u.\n", + cfg->jfr_depth, cap->max_jfr_depth); + return -EINVAL; + } + if (cfg->max_recv_sge > cap->max_jfr_sge) { + ubcore_log_err( + "Invalid parameter, jfr_sge:%u, max_jfr_sge:%u.\n", + cfg->max_recv_sge, cap->max_jfr_sge); + return -EINVAL; + } + } + + return 0; +} + +static int ubcore_add_jetty_to_jetty_grp(struct ubcore_jetty *jetty, + struct ubcore_jetty_group *jetty_grp) +{ + uint32_t max_jetty_in_grp; + uint32_t i; + + max_jetty_in_grp = jetty->ub_dev->attr.dev_cap.max_jetty_in_jetty_grp; + mutex_lock(&jetty_grp->lock); + for (i = 0; i < max_jetty_in_grp; i++) { + if (jetty_grp->jetty[i] == NULL) { + jetty_grp->jetty[i] = jetty; + jetty_grp->jetty_cnt++; + mutex_unlock(&jetty_grp->lock); + return 0; + } + } + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("failed to add jetty to jetty_grp.\n"); + return -1; +} + +static int +ubcore_remove_jetty_from_jetty_grp(struct ubcore_jetty *jetty, + struct ubcore_jetty_group *jetty_grp) +{ + uint32_t max_jetty_in_grp; + uint32_t i; + + if (jetty == NULL || jetty_grp == NULL) + return 0; + + max_jetty_in_grp = jetty->ub_dev->attr.dev_cap.max_jetty_in_jetty_grp; + mutex_lock(&jetty_grp->lock); + for (i = 0; i < max_jetty_in_grp; i++) { + if (jetty_grp->jetty[i] == jetty) { + jetty_grp->jetty[i] = NULL; + jetty_grp->jetty_cnt--; + mutex_unlock(&jetty_grp->lock); + return 0; + } + } + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("failed to delete jetty to jetty_grp.\n"); + return -1; +} + +static int ubcore_jetty_pre_check(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg) +{ + do { + if (check_jetty_cfg(dev, cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + break; + } + + if (check_jetty_cfg_with_jetty_grp(cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + break; + } + + if (check_jetty_check_dev_cap(dev, cfg) != 0) { + ubcore_log_err("failed to check jetty cfg.\n"); + break; + } + return 0; + } while (0); + return -EINVAL; +} + +struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata) +{ + struct ubcore_jetty *jetty; + int ret; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->create_jetty == NULL || dev->ops->destroy_jetty == NULL || + !ubcore_eid_valid(dev, cfg->eid_index, udata)) + return ERR_PTR(-EINVAL); + + if (ubcore_jetty_pre_check(dev, cfg) != 0) + return ERR_PTR(-EINVAL); + + jetty = dev->ops->create_jetty(dev, cfg, udata); + if (IS_ERR_OR_NULL(jetty)) { + ubcore_log_err("failed to create jetty.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(jetty, ENOSPC); + } + + jetty->ub_dev = dev; + if (cfg->jetty_grp != NULL && + ubcore_add_jetty_to_jetty_grp( + jetty, (struct ubcore_jetty_group *)cfg->jetty_grp) != 0) { + ubcore_log_err("jetty cfg is not qualified.\n"); + ret = -EPERM; + goto destroy_jetty; + } + + if (check_and_fill_jetty_attr(&jetty->jetty_cfg, cfg) != 0) { + ubcore_log_err("jetty cfg is not qualified.\n"); + ret = -EINVAL; + goto delete_jetty_to_grp; + } + + jetty->uctx = ubcore_get_uctx(udata); + jetty->jfae_handler = jfae_handler; + jetty->jetty_id.eid = dev->eid_table.eid_entries[cfg->eid_index].eid; + if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC) { + jetty->tptable = ubcore_create_tptable(); + if (jetty->tptable == NULL) { + ubcore_log_err( + "Failed to create tp table in the jetty.\n"); + ret = -ENOMEM; + goto delete_jetty_to_grp; + } + } else { + jetty->tptable = + NULL; /* To prevent kernel-mode drivers, malloc is not empty */ + } + atomic_set(&jetty->use_cnt, 0); + kref_init(&jetty->ref_cnt); + init_completion(&jetty->comp); + + ret = ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_JETTY], + &jetty->hnode, jetty->jetty_id.id); + if (ret != 0) { + ubcore_log_err("Failed to add jetty.\n"); + goto destroy_tptable; + } + + atomic_inc(&cfg->send_jfc->use_cnt); + atomic_inc(&cfg->recv_jfc->use_cnt); + + if (cfg->jfr) + atomic_inc(&cfg->jfr->use_cnt); + + return jetty; +destroy_tptable: + ubcore_destroy_tptable(&jetty->tptable); +delete_jetty_to_grp: + (void)ubcore_remove_jetty_from_jetty_grp( + jetty, (struct ubcore_jetty_group *)cfg->jetty_grp); +destroy_jetty: + (void)dev->ops->destroy_jetty(jetty); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(ubcore_create_jetty); + +int ubcore_modify_jetty(struct ubcore_jetty *jetty, + struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata) +{ + uint32_t jetty_id; + int ret; + + if (jetty == NULL || attr == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->modify_jetty == NULL) + return -EINVAL; + + jetty_id = jetty->jetty_id.id; + + ret = jetty->ub_dev->ops->modify_jetty(jetty, attr, udata); + if (ret != 0) + ubcore_log_err("UBEP failed to modify jetty, jetty_id:%u.\n", + jetty_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_modify_jetty); + +int ubcore_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr) +{ + struct ubcore_device *dev; + uint32_t jetty_id; + int ret; + + if (jetty == NULL || cfg == NULL || attr == NULL || + jetty->ub_dev == NULL || jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->query_jetty == NULL) + return -EINVAL; + + jetty_id = jetty->jetty_id.id; + dev = jetty->ub_dev; + ret = dev->ops->query_jetty(jetty, cfg, attr); + if (ret != 0) + ubcore_log_err("UBEP failed to query jetty, jetty_id:%u.\n", + jetty_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_query_jetty); + +static int ubcore_check_jetty_attr(struct ubcore_jetty *jetty) +{ + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->destroy_jetty == NULL) + return -1; + + if ((jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && + jetty->jetty_cfg.trans_mode == UBCORE_TP_RC && + jetty->remote_jetty != NULL) || + atomic_read(&jetty->use_cnt) > 0) { + ubcore_log_err( + "Failed to delete jetty in RC mode because it has remote jetty"); + return -1; + } + + return 0; +} + +int ubcore_delete_jetty(struct ubcore_jetty *jetty) +{ + struct ubcore_jetty_group *jetty_grp; + struct ubcore_jfc *send_jfc; + struct ubcore_jfc *recv_jfc; + struct ubcore_device *dev; + struct ubcore_jfr *jfr; + uint32_t jetty_id; + int ret; + + if (ubcore_check_jetty_attr(jetty) != 0) + return -EINVAL; + + jetty_grp = jetty->jetty_cfg.jetty_grp; + send_jfc = jetty->jetty_cfg.send_jfc; + recv_jfc = jetty->jetty_cfg.recv_jfc; + jfr = jetty->jetty_cfg.jfr; + jetty_id = jetty->jetty_id.id; + dev = jetty->ub_dev; + + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JETTY], + &jetty->hnode); + ubcore_destroy_tptable(&jetty->tptable); + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && + jetty->remote_jetty != NULL) { + mutex_lock(&jetty->remote_jetty->lock); + (void)ubcore_disconnect_vtp(jetty->remote_jetty->vtpn); + jetty->remote_jetty->vtpn = NULL; + mutex_unlock(&jetty->remote_jetty->lock); + atomic_dec(&jetty->remote_jetty->use_cnt); + /* The tjetty object will release remote jetty resources */ + jetty->remote_jetty = NULL; + ubcore_log_warn( + "jetty->remote_jetty != NULL and it has been handled"); + } + + ubcore_put_jetty(jetty); + wait_for_completion(&jetty->comp); + + if (jetty_grp != NULL) { + (void)ubcore_remove_jetty_from_jetty_grp(jetty, jetty_grp); + jetty->jetty_cfg.jetty_grp = NULL; + } + ret = dev->ops->destroy_jetty(jetty); + if (ret != 0) { + ubcore_log_err("UBEP failed to destroy jetty, jetty_id:%u.\n", + jetty_id); + kref_init(&jetty->ref_cnt); + return ret; + } + + if (send_jfc) + atomic_dec(&send_jfc->use_cnt); + if (recv_jfc) + atomic_dec(&recv_jfc->use_cnt); + if (jfr) + atomic_dec(&jfr->use_cnt); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jetty); + +int ubcore_delete_jetty_batch(struct ubcore_jetty **jetty_arr, int jetty_num, + int *bad_jetty_index) +{ + struct ubcore_jetty_group *jetty_grp; + struct ubcore_jfc **send_jfc = NULL; + struct ubcore_jfc **recv_jfc = NULL; + struct ubcore_jetty *jetty = NULL; + struct ubcore_jfr **jfr = NULL; + struct ubcore_device *dev; + int bad_index = 0; + uint32_t jetty_id; + int ret; + int i; + + if (jetty_arr == NULL || jetty_num <= 0 || bad_jetty_index == NULL) { + ubcore_log_err("Invalid parameter."); + return -EINVAL; + } + + for (i = 0; i < jetty_num; ++i) { + jetty = jetty_arr[i]; + if (ubcore_check_jetty_attr(jetty) != 0) { + *bad_jetty_index = 0; + return -EINVAL; + } + } + send_jfc = kcalloc(jetty_num, sizeof(struct ubcore_jfc *), GFP_KERNEL); + if (send_jfc == NULL) { + *bad_jetty_index = 0; + return -ENOMEM; + } + recv_jfc = kcalloc(jetty_num, sizeof(struct ubcore_jfc *), GFP_KERNEL); + if (recv_jfc == NULL) { + kfree(send_jfc); + *bad_jetty_index = 0; + return -ENOMEM; + } + jfr = kcalloc(jetty_num, sizeof(struct ubcore_jfr *), GFP_KERNEL); + if (jfr == NULL) { + kfree(recv_jfc); + kfree(send_jfc); + *bad_jetty_index = 0; + return -ENOMEM; + } + + for (i = 0; i < jetty_num; ++i) { + jetty = jetty_arr[i]; + jetty_grp = jetty->jetty_cfg.jetty_grp; + send_jfc[i] = jetty->jetty_cfg.send_jfc; + recv_jfc[i] = jetty->jetty_cfg.recv_jfc; + jfr[i] = jetty->jetty_cfg.jfr; + jetty_id = jetty->jetty_id.id; + dev = jetty->ub_dev; + + (void)ubcore_hash_table_check_remove(&dev->ht[UBCORE_HT_JETTY], + &jetty->hnode); + ubcore_destroy_tptable(&jetty->tptable); + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB && + jetty->remote_jetty != NULL) { + mutex_lock(&jetty->remote_jetty->lock); + (void)ubcore_disconnect_vtp(jetty->remote_jetty->vtpn); + jetty->remote_jetty->vtpn = NULL; + mutex_unlock(&jetty->remote_jetty->lock); + atomic_dec(&jetty->remote_jetty->use_cnt); + /* The tjetty object will release remote jetty resources */ + jetty->remote_jetty = NULL; + ubcore_log_warn( + "jetty->remote_jetty != NULL and it has been handled"); + } + + ubcore_put_jetty(jetty); + wait_for_completion(&jetty->comp); + + if (jetty_grp != NULL) { + (void)ubcore_remove_jetty_from_jetty_grp(jetty, + jetty_grp); + jetty->jetty_cfg.jetty_grp = NULL; + } + } + + ret = dev->ops->destroy_jetty_batch(jetty_arr, jetty_num, + bad_jetty_index); + bad_index = jetty_num; + if (ret != 0) { + ubcore_log_err( + "driver failed to destroy jetty batch, index: %d.\n", + *bad_jetty_index); + if (ret == -EINVAL) + bad_index = 0; + else + bad_index = *bad_jetty_index; + if (bad_index >= jetty_num) { + ubcore_log_err( + "driver return bad_jetty_index %d out of range, jetty_num is %d.\n", + *bad_jetty_index, jetty_num); + *bad_jetty_index = 0; + bad_index = jetty_num; + ret = -EFAULT; + } + for (i = bad_index; i < jetty_num; ++i) + kref_init(&jetty_arr[i]->ref_cnt); + } + + /* Do not dereference jetty in jetty_arr, as it might be released */ + for (i = 0; i < bad_index; ++i) { + if (send_jfc[i]) + atomic_dec(&send_jfc[i]->use_cnt); + if (recv_jfc[i]) + atomic_dec(&recv_jfc[i]->use_cnt); + if (jfr[i]) + atomic_dec(&jfr[i]->use_cnt); + } + + kfree(jfr); + kfree(recv_jfc); + kfree(send_jfc); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jetty_batch); + +int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, + struct ubcore_cr *cr) +{ + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->flush_jetty == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + return jetty->ub_dev->ops->flush_jetty(jetty, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_flush_jetty); + +struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_vtpn *vtpn = NULL; + struct ubcore_tjetty *tjetty; + + if (!ubcore_have_ops(dev) || dev->ops->unimport_jetty == NULL || + cfg == NULL || dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + if (ubcore_check_ctrlplane_compat(dev->ops->import_jetty)) + return ubcore_import_jetty_compat(dev, cfg, udata); + + if (ubcore_is_bonding_dev(dev)) { + if (ubcore_connect_exchange_udata_when_import_jetty( + cfg, udata, false) != 0) { + ubcore_log_err( + "Failed to exchange udata when import jetty\n"); + return ERR_PTR(-ENOEXEC); + } + } + + tjetty = dev->ops->import_jetty(dev, cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) { + ubcore_log_err("UBEP failed to import jetty, jetty_id:%u.\n", + cfg->id.id); + return UBCORE_CHECK_RETURN_ERR_PTR(tjetty, ENOEXEC); + } + tjetty->cfg = *cfg; + tjetty->ub_dev = dev; + tjetty->uctx = ubcore_get_uctx(udata); + + atomic_set(&tjetty->use_cnt, 0); + mutex_init(&tjetty->lock); + + /* create rm tp if the remote eid is not connected */ + if (!ubcore_is_bonding_dev(dev) && + dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM || + is_create_rc_shared_tp(cfg->trans_mode, cfg->flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjetty->lock); + vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + mutex_destroy(&tjetty->lock); + (void)dev->ops->unimport_jetty(tjetty); + ubcore_log_err("Failed to setup tp connection.\n"); + if (vtpn == NULL) + return ERR_PTR(-ECONNREFUSED); + return (void *)vtpn; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } else { + tjetty->tp = NULL; + } + + return tjetty; +} +EXPORT_SYMBOL(ubcore_import_jetty); + +struct ubcore_tjetty * +ubcore_import_jetty_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_vtpn *vtpn = NULL; + struct ubcore_tjetty *tjetty; + + if (dev == NULL || dev->ops == NULL || + dev->ops->import_jetty_ex == NULL || + dev->ops->unimport_jetty == NULL || cfg == NULL || + active_tp_cfg == NULL || + dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + tjetty = dev->ops->import_jetty_ex(dev, cfg, active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) { + ubcore_log_err("UBEP failed to import jetty, jetty_id:%u.\n", + cfg->id.id); + return UBCORE_CHECK_RETURN_ERR_PTR(tjetty, ENOEXEC); + } + tjetty->cfg = *cfg; + tjetty->ub_dev = dev; + tjetty->uctx = ubcore_get_uctx(udata); + + atomic_set(&tjetty->use_cnt, 0); + mutex_init(&tjetty->lock); + + /* create rm tp if the remote eid is not connected */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM || + is_create_rc_shared_tp(cfg->trans_mode, cfg->flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjetty->lock); + vtpn = ubcore_connect_vtp_ctrlplane(dev, &vtp_param, + active_tp_cfg, udata); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + mutex_destroy(&tjetty->lock); + (void)dev->ops->unimport_jetty(tjetty); + ubcore_log_err("Failed to setup tp connection.\n"); + if (vtpn == NULL) + return ERR_PTR(-ECONNREFUSED); + return (void *)vtpn; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } else { + tjetty->tp = NULL; + } + + return tjetty; +} +EXPORT_SYMBOL(ubcore_import_jetty_ex); + +int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct ubcore_device *dev; + int ret; + + if (tjetty == NULL || tjetty->ub_dev == NULL || + tjetty->ub_dev->ops == NULL || + tjetty->ub_dev->ops->unimport_jetty == NULL || + !ubcore_have_ops(tjetty->ub_dev)) + return -EINVAL; + + dev = tjetty->ub_dev; + + if (!ubcore_is_bonding_dev(dev) && + dev->transport_type == UBCORE_TRANSPORT_UB && + (tjetty->cfg.trans_mode == UBCORE_TP_RM || + tjetty->cfg.trans_mode == UBCORE_TP_UM || + is_create_rc_shared_tp(tjetty->cfg.trans_mode, + tjetty->cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) && + tjetty->vtpn != NULL) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp(tjetty->vtpn); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + + if (tjetty->cfg.trans_mode == UBCORE_TP_RC && + atomic_read(&tjetty->use_cnt)) + return -EBUSY; + + mutex_destroy(&tjetty->lock); + + return dev->ops->unimport_jetty(tjetty); +} +EXPORT_SYMBOL(ubcore_unimport_jetty); + +static int ubcore_advice_jfs_tjfr(struct ubcore_tp_advice *advice, + struct ubcore_jfs *jfs, + struct ubcore_tjetty *tjfr) +{ + (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); + advice->meta.ht = ubcore_get_tptable(jfs->tptable); + if (advice->meta.ht == NULL) { + ubcore_log_err("tp table has already been destroyed"); + return -1; + } + + advice->ta.type = UBCORE_TA_JFS_TJFR; + advice->ta.jfs = jfs; + advice->ta.tjetty_id = tjfr->cfg.id; + + ubcore_init_tp_key_jetty_id(&advice->meta.key, &tjfr->cfg.id); + advice->meta.hash = ubcore_get_jetty_hash(&tjfr->cfg.id); + return 0; +} + +static int ubcore_advice_jetty_tjetty(struct ubcore_tp_advice *advice, + struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) +{ + (void)memset(advice, 0, sizeof(struct ubcore_tp_advice)); + advice->meta.ht = ubcore_get_tptable(jetty->tptable); + if (advice->meta.ht == NULL) { + ubcore_log_err("tp table has already been destroyed"); + return -1; + } + + advice->ta.type = UBCORE_TA_JETTY_TJETTY; + advice->ta.jetty = jetty; + advice->ta.tjetty_id = tjetty->cfg.id; + + ubcore_init_tp_key_jetty_id(&advice->meta.key, &tjetty->cfg.id); + advice->meta.hash = ubcore_get_jetty_hash(&tjetty->cfg.id); + return 0; +} + +static inline void ubcore_put_advice(struct ubcore_tp_advice *advice) +{ + ubcore_put_tptable(advice->meta.ht); +} + +int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, + struct ubcore_udata *udata) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jfs == NULL || tjfr == NULL || !ubcore_have_tp_ops(jfs->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (jfs->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + + ret = ubcore_advice_jfs_tjfr(&advice, jfs, tjfr); + if (ret != 0) + return ret; + + ret = ubcore_advise_tp(jfs->ub_dev, &tjfr->cfg.id.eid, &advice, udata); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_advise_jfr); + +int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jfs == NULL || tjfr == NULL || !ubcore_have_tp_ops(jfs->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (jfs->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + + ret = ubcore_advice_jfs_tjfr(&advice, jfs, tjfr); + if (ret != 0) + return ret; + + ret = ubcore_unadvise_tp(jfs->ub_dev, &advice); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_unadvise_jfr); + +int ubcore_advise_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL || + !ubcore_have_tp_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_advise_tp(jetty->ub_dev, &tjetty->cfg.id.eid, &advice, + udata); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_advise_jetty); + +int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) +{ + struct ubcore_tp_advice advice; + int ret; + + if (jetty == NULL || tjetty == NULL || + !ubcore_have_tp_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (jetty->ub_dev->transport_type == UBCORE_TRANSPORT_UB) + return 0; + + ret = ubcore_advice_jetty_tjetty(&advice, jetty, tjetty); + if (ret != 0) + return ret; + + ret = ubcore_unadvise_tp(jetty->ub_dev, &advice); + ubcore_put_advice(&advice); + return ret; +} +EXPORT_SYMBOL(ubcore_unadvise_jetty); + +static int ubcore_inner_bind_ub_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_device *dev; + struct ubcore_vtpn *vtpn; + int ret; + + dev = jetty->ub_dev; + + if (dev->ops == NULL || dev->ops->unbind_jetty == NULL) { + ubcore_log_err("Failed to bind jetty, no ops->bind_jetty\n"); + return -1; + } + + if (ubcore_check_ctrlplane_compat(dev->ops->bind_jetty)) + return ubcore_bind_jetty_compat(jetty, tjetty, udata); + + ret = dev->ops->bind_jetty(jetty, tjetty, udata); + if (ret != 0) { + ubcore_log_err("Failed to bind jetty"); + return ret; + } + atomic_inc(&jetty->use_cnt); + + if (!is_create_rc_shared_tp(jetty->jetty_cfg.trans_mode, + jetty->jetty_cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) { + ubcore_set_vtp_param(dev, jetty, &tjetty->cfg, &vtp_param); + mutex_lock(&tjetty->lock); + + if (tjetty->vtpn != NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Duplicate bind\n"); + ret = -EEXIST; + goto unbind; + } + vtpn = ubcore_connect_vtp(dev, &vtp_param); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to setup vtp connection.\n"); + ret = -1; + if (vtpn != NULL) + ret = PTR_ERR(vtpn); + goto unbind; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } + return 0; + +unbind: + if (dev->ops->bind_jetty != NULL && dev->ops->unbind_jetty != NULL) { + (void)dev->ops->unbind_jetty(jetty); + atomic_dec(&jetty->use_cnt); + } + return ret; +} + +static int ubcore_inner_bind_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + int ret; + + dev = jetty->ub_dev; + if (dev == NULL) { + ubcore_log_err("Invalid parameter with dev null_ptr.\n"); + return -1; + } + + if (dev->attr.dev_cap.max_eid_cnt <= tjetty->cfg.eid_index) { + ubcore_log_err("eid_index:%u is beyond the max_eid_cnt:%u.\n", + tjetty->cfg.eid_index, + dev->attr.dev_cap.max_eid_cnt); + return -EINVAL; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + ret = ubcore_inner_bind_ub_jetty(jetty, tjetty, udata); + if (ret != 0) + return ret; + } else { + atomic_inc(&jetty->use_cnt); + } + ubcore_log_info_rl("jetty: %u bind tjetty: %u\n", jetty->jetty_id.id, + tjetty->cfg.id.id); + jetty->remote_jetty = tjetty; + atomic_inc(&tjetty->use_cnt); + return 0; +} + +int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata) +{ + if (jetty == NULL || tjetty == NULL || + !ubcore_have_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -1; + } + if (jetty->remote_jetty == tjetty) { + ubcore_log_info("bind reentry, jetty: %u bind tjetty: %u\n", + jetty->jetty_id.id, tjetty->cfg.id.id); + return 0; + } + if (jetty->remote_jetty != NULL) { + ubcore_log_err( + "The same jetty, different tjetty, prevent duplicate bind.\n"); + return -1; + } + + if (tjetty->vtpn != NULL && + (!is_create_rc_shared_tp(tjetty->cfg.trans_mode, + tjetty->cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_log_err( + "The tjetty, has already connect vtpn, prevent duplicate bind.\n"); + return -1; + } + + return ubcore_inner_bind_jetty(jetty, tjetty, udata); +} +EXPORT_SYMBOL(ubcore_bind_jetty); + +static bool +ubcore_check_tp_handle_available(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_tp_cfg) +{ + struct ubcore_vtpn *vtpn; + + vtpn = ubcore_find_get_vtpn_ctrlplane(dev, active_tp_cfg); + if (vtpn != NULL) { + ubcore_log_err( + "Invalid operation with tp_handle: %llu already used.\n", + active_tp_cfg->tp_handle.value); + ubcore_vtpn_kref_put(vtpn); + return false; + } + return true; +} + +static int ubcore_inner_bind_ub_jetty_ctrlplane( + struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_device *dev; + struct ubcore_vtpn *vtpn; + int ret; + + dev = jetty->ub_dev; + if (dev->ops == NULL || dev->ops->bind_jetty_ex == NULL || + dev->ops->unbind_jetty == NULL) { + ubcore_log_err( + "Failed to bind jetty, no ops->bind_jetty_ex.\n"); + return -1; + } + + if (!ubcore_check_tp_handle_available(dev, active_tp_cfg)) { + ubcore_log_err("Invalid tp_handle: %llu.\n", + active_tp_cfg->tp_handle.value); + return -EINVAL; + } + + ret = dev->ops->bind_jetty_ex(jetty, tjetty, active_tp_cfg, udata); + if (ret != 0) { + ubcore_log_err("Failed to bind jetty.\n"); + return ret; + } + atomic_inc(&jetty->use_cnt); + + if (!is_create_rc_shared_tp(jetty->jetty_cfg.trans_mode, + jetty->jetty_cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) { + ubcore_set_vtp_param(dev, jetty, &tjetty->cfg, &vtp_param); + mutex_lock(&tjetty->lock); + + if (tjetty->vtpn != NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Duplicate bind\n"); + ret = -EEXIST; + goto unbind; + } + vtpn = ubcore_connect_vtp_ctrlplane(dev, &vtp_param, + active_tp_cfg, udata); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to setup vtp connection.\n"); + ret = -1; + if (vtpn != NULL) + ret = PTR_ERR(vtpn); + goto unbind; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } + return 0; + +unbind: + if (dev->ops->bind_jetty_ex != NULL && dev->ops->unbind_jetty != NULL) { + (void)dev->ops->unbind_jetty(jetty); + atomic_dec(&jetty->use_cnt); + } + return ret; +} + +static int ubcore_inner_bind_jetty_ctrlplane( + struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + int ret; + + dev = jetty->ub_dev; + if (dev->attr.dev_cap.max_eid_cnt <= tjetty->cfg.eid_index) { + ubcore_log_err("eid_index:%u is beyond the max_eid_cnt:%u.\n", + tjetty->cfg.eid_index, + dev->attr.dev_cap.max_eid_cnt); + return -EINVAL; + } + + if (dev->transport_type != UBCORE_TRANSPORT_UB) { + ubcore_log_err("Invalid transport_type: %d.\n", + dev->transport_type); + return -EINVAL; + } + + ret = ubcore_inner_bind_ub_jetty_ctrlplane(jetty, tjetty, active_tp_cfg, + udata); + if (ret != 0) + return ret; + + ubcore_log_info_rl("jetty: %u bind tjetty: %u\n", jetty->jetty_id.id, + tjetty->cfg.id.id); + jetty->remote_jetty = tjetty; + atomic_inc(&tjetty->use_cnt); + return 0; +} + +int ubcore_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + if (jetty == NULL || tjetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || active_tp_cfg == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -1; + } + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -1; + } + if (jetty->remote_jetty == tjetty) { + ubcore_log_info("bind reentry, jetty: %u bind tjetty: %u.\n", + jetty->jetty_id.id, tjetty->cfg.id.id); + return 0; + } + if (jetty->remote_jetty != NULL) { + ubcore_log_err( + "The same jetty, different tjetty, prevent duplicate bind.\n"); + return -1; + } + + if (tjetty->vtpn != NULL && + (!is_create_rc_shared_tp(tjetty->cfg.trans_mode, + tjetty->cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_log_err( + "The tjetty, has already connect vtpn, prevent duplicate bind.\n"); + return -1; + } + + return ubcore_inner_bind_jetty_ctrlplane(jetty, tjetty, active_tp_cfg, + udata); +} +EXPORT_SYMBOL(ubcore_bind_jetty_ex); + +static int ubcore_inner_unbind_ub_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty) +{ + int ret; + + if (tjetty->vtpn != NULL) { + if (!is_create_rc_shared_tp(jetty->jetty_cfg.trans_mode, + jetty->jetty_cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp(tjetty->vtpn); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + } + return 0; +} + +int ubcore_unbind_jetty(struct ubcore_jetty *jetty) +{ + struct ubcore_tjetty *tjetty; + struct ubcore_device *dev; + int ret; + + if (jetty == NULL || jetty->ub_dev == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + tjetty = jetty->remote_jetty; + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || tjetty == NULL || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -EINVAL; + } + + dev = jetty->ub_dev; + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + ret = ubcore_inner_unbind_ub_jetty(jetty, tjetty); + if (ret != 0) + return ret; + } + + ubcore_log_info_rl("jetty: %u unbind tjetty: %u\n", jetty->jetty_id.id, + tjetty->cfg.id.id); + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + if (dev->ops == NULL || dev->ops->unbind_jetty == NULL) { + ubcore_log_err( + "Failed to unbind jetty, no ops->unbind_jetty\n"); + return -1; + } + ret = dev->ops->unbind_jetty(jetty); + if (ret != 0) { + ubcore_log_err("Failed to unbind jetty"); + return ret; + } + } + + jetty->remote_jetty = NULL; + atomic_dec(&tjetty->use_cnt); + atomic_dec(&jetty->use_cnt); + return 0; +} +EXPORT_SYMBOL(ubcore_unbind_jetty); + +struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, + uint32_t jetty_id) +{ + if (dev == NULL) { + ubcore_log_err("invalid parameter.\n"); + return NULL; + } + + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_JETTY], jetty_id, + &jetty_id); +} +EXPORT_SYMBOL(ubcore_find_jetty); + +struct ubcore_jetty_group *ubcore_create_jetty_grp( + struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, + ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata) +{ + struct ubcore_jetty_group *jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t i; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->create_jetty_grp == NULL || + dev->ops->delete_jetty_grp == NULL || + !ubcore_eid_valid(dev, cfg->eid_index, udata)) + return ERR_PTR(-EINVAL); + + max_jetty_in_jetty_grp = dev->attr.dev_cap.max_jetty_in_jetty_grp; + if (max_jetty_in_jetty_grp == 0 || + max_jetty_in_jetty_grp > UBCORE_MAX_JETTY_IN_JETTY_GRP) { + ubcore_log_err( + "max_jetty_in_jetty_grp %u is err, range is 1 to %u.\n", + max_jetty_in_jetty_grp, UBCORE_MAX_JETTY_IN_JETTY_GRP); + return ERR_PTR(-EINVAL); + } + + jetty_grp = dev->ops->create_jetty_grp( + dev, (struct ubcore_jetty_grp_cfg *)cfg, udata); + if (IS_ERR_OR_NULL(jetty_grp)) { + ubcore_log_err("failed to create jetty_grp.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(jetty_grp, ENOSPC); + } + + jetty_grp->jetty = + kzalloc(sizeof(struct ubcore_jetty *) * max_jetty_in_jetty_grp, + GFP_KERNEL); + if (jetty_grp->jetty == NULL) { + (void)dev->ops->delete_jetty_grp(jetty_grp); + ubcore_log_err("Failed to alloc jetty array.\n"); + return ERR_PTR(-ENOMEM); + } + + jetty_grp->ub_dev = dev; + jetty_grp->jetty_grp_cfg = *cfg; + jetty_grp->jfae_handler = jfae_handler; + jetty_grp->uctx = ubcore_get_uctx(udata); + jetty_grp->jetty_grp_id.eid = + dev->eid_table.eid_entries[cfg->eid_index].eid; + mutex_init(&jetty_grp->lock); + jetty_grp->jetty_cnt = 0; + for (i = 0; i < max_jetty_in_jetty_grp; i++) + jetty_grp->jetty[i] = NULL; + + return jetty_grp; +} +EXPORT_SYMBOL(ubcore_create_jetty_grp); + +int ubcore_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp) +{ + struct ubcore_device *dev; + uint32_t jetty_grp_id; + int ret; + + if (jetty_grp == NULL || jetty_grp->ub_dev == NULL || + jetty_grp->ub_dev->ops == NULL || + jetty_grp->ub_dev->ops->delete_jetty_grp == NULL) + return -EINVAL; + + jetty_grp_id = jetty_grp->jetty_grp_id.id; + dev = jetty_grp->ub_dev; + + mutex_lock(&jetty_grp->lock); + if (jetty_grp->jetty_cnt > 0) { + mutex_unlock(&jetty_grp->lock); + ubcore_log_err("jetty_grp->jetty_cnt: %u.\n", + jetty_grp->jetty_cnt); + return -EBUSY; + } + if (jetty_grp->jetty != NULL) { + kfree(jetty_grp->jetty); + jetty_grp->jetty = NULL; + } + mutex_unlock(&jetty_grp->lock); + mutex_destroy(&jetty_grp->lock); + + ret = dev->ops->delete_jetty_grp(jetty_grp); + if (ret != 0) + ubcore_log_err( + "UBEP failed to destroy jetty_grp, jetty_grp_id:%u.\n", + jetty_grp_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_delete_jetty_grp); + +struct ubcore_tjetty *ubcore_import_jetty_async(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + int timeout, + struct ubcore_import_cb *cb, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_vtpn *vtpn = NULL; + struct ubcore_tjetty *tjetty; + struct ubcore_vtpn_cb_para para = { 0 }; + + if (!ubcore_have_ops(dev) || dev->ops->import_jetty == NULL || + dev->ops->unimport_jetty == NULL || cfg == NULL || + dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index || cb == NULL) + return ERR_PTR(-EINVAL); + + tjetty = dev->ops->import_jetty(dev, cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) { + ubcore_log_err( + "UBEP failed to import jetty async, jetty_id:%u.\n", + cfg->id.id); + return UBCORE_CHECK_RETURN_ERR_PTR(tjetty, ENOEXEC); + } + tjetty->cfg = *cfg; + tjetty->ub_dev = dev; + tjetty->uctx = ubcore_get_uctx(udata); + + atomic_set(&tjetty->use_cnt, 0); + mutex_init(&tjetty->lock); + + para.type = UBCORE_IMPORT_JETTY_VTPN; + para.tjetty = tjetty; + para.import_cb = cb; + + /* create rm tp if the remote eid is not connected */ + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (cfg->trans_mode == UBCORE_TP_RM || + cfg->trans_mode == UBCORE_TP_UM || + is_create_rc_shared_tp(cfg->trans_mode, cfg->flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_set_vtp_param(dev, NULL, cfg, &vtp_param); + mutex_lock(&tjetty->lock); + vtpn = ubcore_connect_vtp_async(dev, &vtp_param, timeout, + ¶); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + mutex_destroy(&tjetty->lock); + (void)dev->ops->unimport_jetty(tjetty); + ubcore_log_err( + "Failed to setup asynchronous tp connection.\n"); + if (vtpn == NULL) + return ERR_PTR(-ECONNREFUSED); + return (void *)vtpn; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } else { + tjetty->tp = NULL; + cb->callback(tjetty, 0, cb->user_arg); + kfree(cb); + } + + return tjetty; +} +EXPORT_SYMBOL(ubcore_import_jetty_async); + +int ubcore_unimport_jetty_async(struct ubcore_tjetty *tjetty, int timeout, + struct ubcore_unimport_cb *cb) +{ + struct ubcore_device *dev; + struct ubcore_vtpn_cb_para para = { 0 }; + int ret; + + if (tjetty == NULL || tjetty->ub_dev == NULL || + tjetty->ub_dev->ops == NULL || + tjetty->ub_dev->ops->unimport_jetty == NULL || + !ubcore_have_ops(tjetty->ub_dev)) + return -EINVAL; + + dev = tjetty->ub_dev; + para.type = UBCORE_UNIMPORT_JETTY_VTPN; + para.unimport_cb = cb; + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + (tjetty->cfg.trans_mode == UBCORE_TP_RM || + tjetty->cfg.trans_mode == UBCORE_TP_UM || + is_create_rc_shared_tp(tjetty->cfg.trans_mode, + tjetty->cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) && + tjetty->vtpn != NULL) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp_async(tjetty->vtpn, timeout, ¶); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + + if (tjetty->cfg.trans_mode == UBCORE_TP_RC && + atomic_read(&tjetty->use_cnt)) + return -EBUSY; + + mutex_destroy(&tjetty->lock); + + return dev->ops->unimport_jetty(tjetty); +} +EXPORT_SYMBOL(ubcore_unimport_jetty_async); + +static int ubcore_inner_bind_ub_jetty_async(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + int timeout, + struct ubcore_bind_cb *cb, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param vtp_param = { 0 }; + struct ubcore_device *dev; + struct ubcore_vtpn *vtpn; + struct ubcore_vtpn_cb_para para = { 0 }; + int ret; + + dev = jetty->ub_dev; + + if (dev->ops == NULL || dev->ops->bind_jetty == NULL || + dev->ops->unbind_jetty == NULL) { + ubcore_log_err( + "Failed to bind jetty async, no bind/unbind ops\n"); + return -1; + } + + ret = dev->ops->bind_jetty(jetty, tjetty, udata); + if (ret != 0) { + ubcore_log_err("Failed to bind jetty async"); + return ret; + } + atomic_inc(&jetty->use_cnt); + + para.type = UBCORE_BIND_JETTY_VTPN; + para.tjetty = tjetty; + para.jetty = jetty; + para.bind_cb = cb; + + if (!is_create_rc_shared_tp(jetty->jetty_cfg.trans_mode, + jetty->jetty_cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) { + ubcore_set_vtp_param(dev, jetty, &tjetty->cfg, &vtp_param); + mutex_lock(&tjetty->lock); + + if (tjetty->vtpn != NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Duplicate bind\n"); + ret = -EEXIST; + goto unbind; + } + vtpn = ubcore_connect_vtp_async(dev, &vtp_param, timeout, + ¶); + if (IS_ERR_OR_NULL(vtpn)) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to setup vtp connection.\n"); + ret = -1; + if (vtpn != NULL) + ret = PTR_ERR(vtpn); + goto unbind; + } + tjetty->vtpn = vtpn; + mutex_unlock(&tjetty->lock); + } else { + if (cb != NULL) { + cb->callback(jetty, tjetty, 0, cb->user_arg); + kfree(cb); + } + } + return 0; + +unbind: + if (dev->ops->bind_jetty != NULL && dev->ops->unbind_jetty != NULL) { + (void)dev->ops->unbind_jetty(jetty); + atomic_dec(&jetty->use_cnt); + } + return ret; +} + +static int ubcore_inner_bind_jetty_async(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + int timeout, struct ubcore_bind_cb *cb, + struct ubcore_udata *udata) +{ + struct ubcore_device *dev; + int ret; + + dev = jetty->ub_dev; + if (dev == NULL) { + ubcore_log_err("Invalid parameter with dev null_ptr.\n"); + return -1; + } + + if (dev->attr.dev_cap.max_eid_cnt <= tjetty->cfg.eid_index) { + ubcore_log_err("eid_index:%u is beyond the max_eid_cnt:%u.\n", + tjetty->cfg.eid_index, + dev->attr.dev_cap.max_eid_cnt); + return -EINVAL; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + ret = ubcore_inner_bind_ub_jetty_async(jetty, tjetty, timeout, + cb, udata); + if (ret != 0) + return ret; + } + ubcore_log_info_rl("jetty: %u bind tjetty async: %u\n", + jetty->jetty_id.id, tjetty->cfg.id.id); + jetty->remote_jetty = tjetty; + atomic_inc(&tjetty->use_cnt); + return 0; +} + +int ubcore_bind_jetty_async(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, int timeout, + struct ubcore_bind_cb *cb, + struct ubcore_udata *udata) +{ + if (jetty == NULL || tjetty == NULL || cb == NULL || + !ubcore_have_ops(jetty->ub_dev)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -1; + } + if (jetty->remote_jetty == tjetty) { + ubcore_log_info("bind reentry, jetty: %u bind tjetty: %u\n", + jetty->jetty_id.id, tjetty->cfg.id.id); + if (cb != NULL) { + cb->callback(jetty, tjetty, 0, cb->user_arg); + kfree(cb); + } + return 0; + } + if (jetty->remote_jetty != NULL) { + ubcore_log_err( + "The same jetty, different tjetty, prevent duplicate bind.\n"); + return -1; + } + if (tjetty->vtpn != NULL && + (!is_create_rc_shared_tp(tjetty->cfg.trans_mode, + tjetty->cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp))) { + ubcore_log_err( + "The tjetty, has already connect vtpn, prevent duplicate bind.\n"); + return -1; + } + + return ubcore_inner_bind_jetty_async(jetty, tjetty, timeout, cb, udata); +} +EXPORT_SYMBOL(ubcore_bind_jetty_async); + +static int ubcore_inner_unbind_ub_jetty_async(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + int timeout, + struct ubcore_unbind_cb *cb) +{ + struct ubcore_vtpn_cb_para para = { 0 }; + int ret; + + para.type = UBCORE_UNBIND_JETTY_VTPN; + para.unbind_cb = cb; + if (tjetty->vtpn != NULL) { + if (!is_create_rc_shared_tp(jetty->jetty_cfg.trans_mode, + jetty->jetty_cfg.flag.bs.order_type, + tjetty->cfg.flag.bs.share_tp)) { + mutex_lock(&tjetty->lock); + ret = ubcore_disconnect_vtp_async(tjetty->vtpn, timeout, + ¶); + if (ret != 0) { + mutex_unlock(&tjetty->lock); + ubcore_log_err("Failed to disconnect vtp.\n"); + return ret; + } + tjetty->vtpn = NULL; + mutex_unlock(&tjetty->lock); + } + } + return 0; +} + +int ubcore_unbind_jetty_async(struct ubcore_jetty *jetty, int timeout, + struct ubcore_unbind_cb *cb) +{ + struct ubcore_tjetty *tjetty; + struct ubcore_device *dev; + int ret; + + if (jetty == NULL || jetty->ub_dev == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + tjetty = jetty->remote_jetty; + if ((jetty->jetty_cfg.trans_mode != UBCORE_TP_RC) || tjetty == NULL || + (tjetty->cfg.trans_mode != UBCORE_TP_RC)) { + ubcore_log_err("trans mode is not rc type.\n"); + return -EINVAL; + } + + dev = jetty->ub_dev; + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + ret = ubcore_inner_unbind_ub_jetty_async(jetty, tjetty, timeout, + cb); + if (ret != 0) + return ret; + } + + ubcore_log_info_rl("jetty: %u unbind tjetty async: %u\n", + jetty->jetty_id.id, tjetty->cfg.id.id); + + if (dev->transport_type == UBCORE_TRANSPORT_UB) { + if (dev->ops == NULL || dev->ops->bind_jetty == NULL || + dev->ops->unbind_jetty == NULL) { + ubcore_log_err( + "Failed to unbind jetty, no ops->unbind_jetty\n"); + return -1; + } + ret = dev->ops->unbind_jetty(jetty); + if (ret != 0) { + ubcore_log_err("Failed to unbind jetty"); + return ret; + } + } + + jetty->remote_jetty = NULL; + atomic_dec(&tjetty->use_cnt); + atomic_dec(&jetty->use_cnt); + return 0; +} +EXPORT_SYMBOL(ubcore_unbind_jetty_async); diff --git a/drivers/ub/urma/ubcore/ubcore_log.c b/drivers/ub/urma/ubcore/ubcore_log.c new file mode 100644 index 000000000000..c4f93d2358f9 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_log.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore log file + * Author: Qian Guoxin + * Create: 2024-2-5 + * Note: + * History: 2024-2-5: Create file + */ + +#include +#include "ubcore_log.h" + +uint32_t g_ubcore_log_level = UBCORE_LOG_LEVEL_WARNING; diff --git a/drivers/ub/urma/ubcore/ubcore_log.h b/drivers/ub/urma/ubcore/ubcore_log.h new file mode 100644 index 000000000000..b94d43339062 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_log.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore log head file + * Author: Qian Guoxin + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + */ + +#ifndef UBCORE_LOG_H +#define UBCORE_LOG_H + +#include +#include + +enum ubcore_log_level { + UBCORE_LOG_LEVEL_EMERG = 0, + UBCORE_LOG_LEVEL_ALERT = 1, + UBCORE_LOG_LEVEL_CRIT = 2, + UBCORE_LOG_LEVEL_ERR = 3, + UBCORE_LOG_LEVEL_WARNING = 4, + UBCORE_LOG_LEVEL_NOTICE = 5, + UBCORE_LOG_LEVEL_INFO = 6, + UBCORE_LOG_LEVEL_DEBUG = 7, + UBCORE_LOG_LEVEL_MAX = 8, +}; + +/* add log head info, "LogTag_UBCORE|function|[line]| */ +#define UBCORE_LOG_TAG "LogTag_UBCORE" +/* only use debug log */ +#define ubcore_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBCORE_LOG_TAG, __func__, __LINE__, ##args) +/* use default log, info/warn/err */ +#define ubcore_default_log(l, format, args...) \ + ((void)pr_##l("%s|%s:[%d]|" format, UBCORE_LOG_TAG, __func__, \ + __LINE__, ##args)) + +#define UBCORE_RATELIMIT_INTERVAL (5 * HZ) +#define UBCORE_RATELIMIT_BURST 100 + +extern uint32_t g_ubcore_log_level; + +#define ubcore_log_info(...) \ + do { \ + if (g_ubcore_log_level >= UBCORE_LOG_LEVEL_INFO) \ + ubcore_default_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcore_log_err(...) \ + do { \ + if (g_ubcore_log_level >= UBCORE_LOG_LEVEL_ERR) \ + ubcore_default_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcore_log_warn(...) \ + do { \ + if (g_ubcore_log_level >= UBCORE_LOG_LEVEL_WARNING) \ + ubcore_default_log(warn, __VA_ARGS__); \ + } while (0) + +#define ubcore_log_debug(...) \ + do { \ + if (g_ubcore_log_level >= UBCORE_LOG_LEVEL_DEBUG) \ + ubcore_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define ubcore_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCORE_RATELIMIT_INTERVAL, \ + UBCORE_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcore_log_level >= UBCORE_LOG_LEVEL_INFO)) \ + ubcore_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcore_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCORE_RATELIMIT_INTERVAL, \ + UBCORE_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcore_log_level >= UBCORE_LOG_LEVEL_ERR)) \ + ubcore_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcore_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCORE_RATELIMIT_INTERVAL, \ + UBCORE_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcore_log_level >= UBCORE_LOG_LEVEL_WARNING)) \ + ubcore_log(warn, __VA_ARGS__); \ + } while (0) + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_main.c b/drivers/ub/urma/ubcore/ubcore_main.c new file mode 100644 index 000000000000..2617778125e5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_main.c @@ -0,0 +1,1129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore kernel module + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net/ubcore_net.h" +#include "ubcore_cmd.h" +#include "ubcore_connect_adapter.h" +#include "ubcore_connect_bonding.h" +#include "ubcore_uvs_cmd.h" +#include "ubcore_log.h" +#include "ubcore_netlink.h" +#include +#include +#include +#include "ubcore_priv.h" +#include "ubcore_netdev.h" +#include "ubcore_msg.h" +#include "ubcore_genl.h" +#include "ubcore_workqueue.h" +#include "ubcore_device.h" +#include "ubcore_uvs.h" +#include "ubcm/ub_cm.h" +#include "net/ubcore_net.h" + +#include "ubcore_main.h" + +#define UBCORE_LOG_FILE_PERMISSION (0644) + +module_param(g_ubcore_log_level, uint, UBCORE_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubcore_log_level, " 3: ERR, 4: WARNING, 6: INFO, 7: DEBUG"); + +module_param(g_ubcore_connect_type, uint, UBCORE_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubcore_connect_type, " 0: WK-JETTY, 1: SOCK "); + +/* ubcore create independent cdev and ioctl channels + * to handle public work. + */ +#define UBCORE_IPV4_MAP_IPV6_PREFIX 0x0000ffff +#define UBCORE_LOCAL_SHUNET (0xfe80000000000000ULL) +#define SIP_MTU_BITS_BASE_SHIFT 7 + +static DEFINE_MUTEX(g_ubcore_net_event_lock); + +struct ubcore_net_addr_node { + struct list_head node; + struct ubcore_net_addr addr; +}; + +enum ubcore_sip_op_type { + UBCORE_SIP_DEL = 0, + UBCORE_SIP_ADD, + UBCORE_SIP_UPDATE +}; + +struct ubcore_notify_uvs_sip_event_work { + struct work_struct work; + struct ubcore_device *mue_dev; + struct ubcore_sip_info new_sip; + struct ubcore_sip_info old_sip; + enum ubcore_sip_op_type sip_op; + uint32_t index; +}; + +struct ubcore_version { + uint32_t version; /* UBCORE_INVALID_VERSION: not negotiated yet. */ + uint32_t cap; /* Currently, not used. */ +}; + +static struct ubcore_version g_ubcore_version = { UBCORE_INVALID_VERSION, 0 }; +/* Versions should be in decending order. */ +static uint32_t g_ubcore_support_versions[UBCORE_SUPPORT_VERION_NUM] = { + UBCORE_VERSION0 +}; + +bool ubcore_negotiated(void) +{ + return g_ubcore_version.version != UBCORE_INVALID_VERSION; +} + +uint32_t ubcore_get_version(void) +{ + return g_ubcore_version.version; +} + +void ubcore_set_version(uint32_t version) +{ + g_ubcore_version.version = version; +} + +uint32_t ubcore_get_cap(void) +{ + return g_ubcore_version.cap; +} + +void ubcore_set_cap(uint32_t cap) +{ + g_ubcore_version.cap = cap; +} + +uint32_t *ubcore_get_support_versions(void) +{ + return g_ubcore_support_versions; +} + +static void ubcore_ipv4_to_netaddr(struct ubcore_net_addr *netaddr, __be32 ipv4) +{ + netaddr->net_addr.in4.reserved1 = 0; + netaddr->net_addr.in4.reserved2 = htonl(UBCORE_IPV4_MAP_IPV6_PREFIX); + netaddr->net_addr.in4.addr = ipv4; +} + +static inline uint32_t sip_mtu_enum_to_int(enum ubcore_mtu mtu) +{ + return (uint32_t)(1 << ((uint32_t)mtu + SIP_MTU_BITS_BASE_SHIFT)); +} + +static enum ubcore_mtu sip_get_mtu(uint32_t mtu) +{ + if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_8192)) + return UBCORE_MTU_8192; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_4096)) + return UBCORE_MTU_4096; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_2048)) + return UBCORE_MTU_2048; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_1024)) + return UBCORE_MTU_1024; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_512)) + return UBCORE_MTU_512; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_256)) + return UBCORE_MTU_256; + else + return (enum ubcore_mtu)0; +} + +static enum ubcore_mtu sip_get_mtu_with_ub(uint32_t mtu) +{ + if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_8192)) + return UBCORE_MTU_8192; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_4096)) + return UBCORE_MTU_4096; + else if (mtu >= sip_mtu_enum_to_int(UBCORE_MTU_1024)) + return UBCORE_MTU_1024; + else + return (enum ubcore_mtu)0; +} + +static void ubcore_sip_init(struct ubcore_sip_info *sip, + struct ubcore_device *dev, + const struct ubcore_net_addr *netaddr, + struct net_device *netdev) +{ + memcpy(sip->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + memcpy(&sip->addr, netaddr, sizeof(struct ubcore_net_addr)); + + if (netdev) { + ubcore_fill_port_netdev(dev, netdev, sip->port_id, + &sip->port_cnt); + sip->mtu = dev->transport_type == UBCORE_TRANSPORT_UB ? + (uint32_t)sip_get_mtu_with_ub(netdev->mtu) : + (uint32_t)sip_get_mtu(netdev->mtu); + memcpy(sip->netdev_name, netdev_name(netdev), + UBCORE_MAX_DEV_NAME); + } else { + /* mtu is 4k for UBC scenario */ + sip->mtu = UBCORE_MTU_4096; + } +} + +static void ubcore_notify_uvs_update_sip(struct ubcore_device *mue_dev, + struct ubcore_sip_info *new_sip, + struct ubcore_sip_info *old_sip, + uint32_t index) +{ + (void)ubcore_notify_uvs_del_sip(mue_dev, old_sip, index); + (void)ubcore_notify_uvs_add_sip(mue_dev, new_sip, index); +} + +static bool ubcore_notify_uvs_update_sip_sync(struct ubcore_device *mue_dev, + struct ubcore_sip_info *new_sip, + struct ubcore_sip_info *old_sip, + enum ubcore_sip_op_type sip_op, + uint32_t index) +{ + if (ubcore_get_netlink_valid() != true) + return true; + + switch (sip_op) { + case UBCORE_SIP_DEL: + (void)ubcore_notify_uvs_del_sip(mue_dev, old_sip, index); + return true; + case UBCORE_SIP_ADD: + (void)ubcore_notify_uvs_add_sip(mue_dev, new_sip, index); + return true; + case UBCORE_SIP_UPDATE: + ubcore_notify_uvs_update_sip(mue_dev, new_sip, old_sip, index); + return true; + default: + ubcore_log_err("sip_op_type out of range"); + return false; + } +} + +static void ubcore_notify_uvs_update_sip_task(struct work_struct *work) +{ + struct ubcore_notify_uvs_sip_event_work *l_work = container_of( + work, struct ubcore_notify_uvs_sip_event_work, work); + + (void)ubcore_notify_uvs_update_sip_sync(l_work->mue_dev, + &l_work->new_sip, + &l_work->old_sip, + l_work->sip_op, l_work->index); + kfree(l_work); +} + +static int ubcore_notify_uvs_update_sip_async(struct ubcore_device *mue_dev, + struct ubcore_sip_info *new_sip, + struct ubcore_sip_info *old_sip, + enum ubcore_sip_op_type sip_op, + uint32_t index) +{ + struct ubcore_notify_uvs_sip_event_work *work; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; + + INIT_WORK(&work->work, ubcore_notify_uvs_update_sip_task); + work->mue_dev = mue_dev; + if (new_sip != NULL) + work->new_sip = *(new_sip); + if (old_sip != NULL) + work->old_sip = *(old_sip); + work->index = index; + work->sip_op = sip_op; + if (ubcore_queue_work((int)UBCORE_SIP_NOTIFY_WQ, &work->work) != 0) { + kfree(work); + ubcore_log_err("Queue work failed"); + return -1; + } + return 0; +} + +static int ubcore_notify_uvs_update_sip_manage(struct ubcore_device *mue_dev, + struct ubcore_sip_info *new_sip, + struct ubcore_sip_info *old_sip, + enum ubcore_sip_op_type sip_op, + uint32_t index, bool async) +{ + int ret = 0; + + if (!async) { + (void)ubcore_notify_uvs_update_sip_sync(mue_dev, new_sip, + old_sip, sip_op, index); + return 0; + } + + ret = ubcore_notify_uvs_update_sip_async(mue_dev, new_sip, old_sip, + sip_op, index); + if (ret != 0) + ubcore_log_err("kzalloc failed or queue type err"); + return ret; +} + +static int ubcore_get_upi(char *dev_name, uint32_t pattern, uint32_t *upi) +{ + if (pattern == UBCORE_PATTERN_1) { + *upi = 0; + return 0; + } + + if (pattern == UBCORE_PATTERN_3) { + if (ubcore_find_upi_with_dev_name(dev_name, upi) != 0) { + ubcore_log_err("can not find dev:%s\n", dev_name); + return -EINVAL; + } + + return 0; + } + + ubcore_log_err("Invalid pattern\n"); + return -EINVAL; +} + +static int ubcore_handle_update_eid(struct ubcore_device *mue_dev, + uint32_t ue_idx, enum ubcore_net_addr_op op, + struct ubcore_eid_update_info *eid_info) +{ + struct ubcore_ueid_cfg cfg = { 0 }; + + cfg.eid = eid_info->eid; + cfg.eid_index = eid_info->eid_idx; + + if (eid_info->upi_present) { + cfg.upi = eid_info->upi; + } else { + if (ubcore_get_upi(eid_info->dev_name, eid_info->pattern, + &cfg.upi) != 0) { + ubcore_log_err("Failed to upi, dev:%s, idx:%u", + eid_info->dev_name, eid_info->eid_idx); + return -ENXIO; + } + } + + if (op == UBCORE_ADD_NET_ADDR) + return ubcore_add_ueid(mue_dev, ue_idx, &cfg); + + if (op == UBCORE_DEL_NET_ADDR) + return ubcore_delete_ueid(mue_dev, ue_idx, &cfg); + + return -EINVAL; +} + +int ubcore_send_eid_update_req(struct ubcore_device *dev, + enum ubcore_net_addr_op op, + union ubcore_eid *eid, uint32_t eid_idx, + uint32_t *upi) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_update_eid_req); + struct ubcore_update_eid_req *msg_data; + struct ubcore_req *req; + int ret; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return -ENOMEM; + req->opcode = UBCORE_MSP_UPDATE_EID; + req->len = data_len; + + msg_data = (struct ubcore_update_eid_req *)req->data; + + msg_data->op = op; + msg_data->eid_info.eid = *eid; + msg_data->eid_info.pattern = dev->attr.pattern; + if (upi) { + msg_data->eid_info.upi_present = true; + msg_data->eid_info.upi = *upi; + } + + if (dev->attr.tp_maintainer) + ret = ubcore_handle_update_eid(dev, dev->attr.ue_idx, + msg_data->op, + &msg_data->eid_info); + else + ret = ubcore_send_req( + dev, + req); + kfree(req); + return ret; +} + +static void ubcore_handle_add_sip(struct ubcore_device *mue_dev, + struct ubcore_sip_info *sip, bool async) +{ + uint32_t index; + int ret; + + ret = ubcore_lookup_sip_idx(&mue_dev->sip_table, sip, &index); + if (ret == 0) { + ubcore_log_err("sip already exists\n"); + return; + } + + index = (uint32_t)ubcore_sip_idx_alloc(&mue_dev->sip_table); + if (mue_dev->ops != NULL && mue_dev->ops->add_net_addr != NULL && + mue_dev->ops->add_net_addr(mue_dev, &sip->addr, index) != 0) + ubcore_log_err("Failed to set net addr"); + + /* add net_addr entry, record idx -> netaddr mapping */ + (void)ubcore_add_sip_entry(&mue_dev->sip_table, sip, index); + + /* nodify uvs add sip info */ + if (ubcore_notify_uvs_update_sip_manage( + mue_dev, sip, NULL, UBCORE_SIP_ADD, index, async) != 0) + ubcore_log_err("notify uvs sip failed"); +} + +static void ubcore_handle_delete_sip(struct ubcore_device *mue_dev, + struct ubcore_sip_info *sip, bool async) +{ + uint32_t index; + + if (ubcore_lookup_sip_idx(&mue_dev->sip_table, sip, &index) != 0) + return; + + if (ubcore_del_net_addr(mue_dev, index) != 0) + ubcore_log_err("Failed to delete net addr"); + + (void)ubcore_del_sip_entry(&mue_dev->sip_table, index); + (void)ubcore_sip_idx_free(&mue_dev->sip_table, index); + /* nodify uvs delete sip info */ + if (ubcore_notify_uvs_update_sip_manage( + mue_dev, NULL, sip, UBCORE_SIP_DEL, index, async) != 0) + ubcore_log_err("notify uvs sip failed"); +} + +static void ubcore_hanlde_update_sip(struct ubcore_device *mue_dev, + struct ubcore_sip_info *sip, bool async) +{ + struct ubcore_sip_info old_sip = { 0 }; + uint32_t sip_idx; + int ret = 0; + + ret = ubcore_update_sip_entry(&mue_dev->sip_table, sip, &sip_idx, + &old_sip); + if (ret != 0) { + ubcore_log_err("Failed to update sip"); + return; + } + + if (ubcore_notify_uvs_update_sip_manage(mue_dev, sip, &old_sip, + UBCORE_SIP_UPDATE, sip_idx, + async) != 0) + ubcore_log_err("notify uvs sip failed"); +} + +static int ubcore_hanlde_update_net_addr(struct ubcore_device *mue_dev, + uint32_t ue_idx, + struct ubcore_update_net_addr_req *req, + bool async) +{ + if (ubcore_is_ub_device(mue_dev) && req->sip_present) { + if (req->op == UBCORE_ADD_NET_ADDR) + ubcore_handle_add_sip(mue_dev, &req->sip_info, async); + else if (req->op == UBCORE_DEL_NET_ADDR) + ubcore_handle_delete_sip(mue_dev, &req->sip_info, + async); + else if (req->op == UBCORE_UPDATE_NET_ADDR) + ubcore_hanlde_update_sip(mue_dev, &req->sip_info, + async); + } + + if (req->eid_present) + return ubcore_handle_update_eid(mue_dev, ue_idx, req->op, + &req->eid_info); + + return 0; +} + +static int +ubcore_send_net_addr_update_req(struct ubcore_device *dev, + struct ubcore_update_net_addr_req *add_req) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_update_net_addr_req); + struct ubcore_update_net_addr_req *msg_data; + struct ubcore_req *req; + int ret; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return -ENOMEM; + + req->opcode = UBCORE_MSG_UPDATE_NET_ADDR; + req->len = data_len; + + msg_data = (struct ubcore_update_net_addr_req *)req->data; + *msg_data = *add_req; + + ret = ubcore_send_req(dev, req); + kfree(req); + return ret; +} + +static int ubcore_update_eid_tbl(struct ubcore_device *dev, + struct ubcore_net_addr *netaddr, bool is_add, + struct net *net, uint32_t *eid_idx) +{ + union ubcore_eid *eid; + + if (dev->transport_type <= UBCORE_TRANSPORT_INVALID || + dev->transport_type >= UBCORE_TRANSPORT_MAX) + return -EINVAL; + + if (!dev->dynamic_eid) { + ubcore_log_err("static mode does not allow modify of eid\n"); + return -EINVAL; + } + + if (ubcore_check_ctrlplane(dev)) { + ubcore_log_warn( + "No need to update eid table for control plane.\n"); + return 0; + } + eid = (union ubcore_eid *)(void *)&netaddr->net_addr; + return ubcore_update_eidtbl_by_eid(dev, eid, eid_idx, is_add, net); +} + +static int ubcore_handle_inetaddr_event(struct net_device *netdev, + unsigned long event, + struct ubcore_net_addr *netaddr) +{ + struct ubcore_device **devices; + uint32_t num_devices = 0; + struct ubcore_device *dev; + + uint32_t i; + + if (netdev == NULL || + (netdev->reg_state >= NETREG_UNREGISTERING && event != NETDEV_DOWN)) + return NOTIFY_DONE; + + devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (devices == NULL) + return NOTIFY_DONE; + + for (i = 0; i < num_devices; i++) { + dev = devices[i]; + if (dev->attr.virtualization) + continue; + + switch (event) { + case NETDEV_UP: + ubcore_update_net_addr(dev, netdev, netaddr, + UBCORE_ADD_NET_ADDR, true); + break; + case NETDEV_DOWN: + ubcore_update_net_addr(dev, netdev, netaddr, + UBCORE_DEL_NET_ADDR, true); + break; + default: + break; + } + } + ubcore_put_devices(devices, num_devices); + + return NOTIFY_OK; +} + +static int ubcore_ipv6_notifier_call(struct notifier_block *nb, + unsigned long event, void *arg) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; + struct ubcore_net_addr netaddr; + struct net_device *netdev; + + if (ifa == NULL || ifa->idev == NULL || ifa->idev->dev == NULL) + return NOTIFY_DONE; + + netdev = ifa->idev->dev; + ubcore_log_info( + "Get a ipv6 event %s from netdev %s%s ip %pI6c prefixlen %u\n", + netdev_cmd_to_name(event), netdev_name(netdev), + netdev_reg_state(netdev), &ifa->addr, ifa->prefix_len); + + memset(&netaddr, 0, sizeof(struct ubcore_net_addr)); + memcpy(&netaddr.net_addr, &ifa->addr, sizeof(struct in6_addr)); + (void)ubcore_fill_netaddr_macvlan(&netaddr, netdev, + UBCORE_NET_ADDR_TYPE_IPV6); + netaddr.prefix_len = ifa->prefix_len; + + if (netaddr.net_addr.in6.subnet_prefix == + cpu_to_be64(UBCORE_LOCAL_SHUNET)) + /* When mtu changes, intercept the ipv6 address up/down that triggers fe80 */ + return NOTIFY_DONE; + return ubcore_handle_inetaddr_event(netdev, event, &netaddr); +} + +static int ubcore_ipv4_notifier_call(struct notifier_block *nb, + unsigned long event, void *arg) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)arg; + struct ubcore_net_addr netaddr; + struct net_device *netdev; + + if (ifa == NULL || ifa->ifa_dev == NULL || ifa->ifa_dev->dev == NULL) + return NOTIFY_DONE; + + netdev = ifa->ifa_dev->dev; + ubcore_log_info("Get a ipv4 event %s netdev %s%s ip %pI4 prefixlen %u", + netdev_cmd_to_name(event), netdev_name(netdev), + netdev_reg_state(netdev), &ifa->ifa_address, + ifa->ifa_prefixlen); + + memset(&netaddr, 0, sizeof(struct ubcore_net_addr)); + ubcore_ipv4_to_netaddr(&netaddr, ifa->ifa_address); + (void)ubcore_fill_netaddr_macvlan(&netaddr, netdev, + UBCORE_NET_ADDR_TYPE_IPV4); + netaddr.prefix_len = ifa->ifa_prefixlen; + return ubcore_handle_inetaddr_event(netdev, event, &netaddr); +} + +static void ubcore_add_ipv4_entry(struct list_head *list, __be32 ipv4, + uint32_t prefix_len, + struct net_device *netdev) +{ + struct ubcore_net_addr_node *na_entry; + + na_entry = kzalloc(sizeof(struct ubcore_net_addr_node), GFP_ATOMIC); + if (na_entry == NULL) + return; + + ubcore_ipv4_to_netaddr(&na_entry->addr, ipv4); + (void)ubcore_fill_netaddr_macvlan(&na_entry->addr, netdev, + UBCORE_NET_ADDR_TYPE_IPV4); + na_entry->addr.prefix_len = prefix_len; + list_add_tail(&na_entry->node, list); +} + +static void ubcore_add_ipv6_entry(struct list_head *list, struct in6_addr *ipv6, + uint32_t prefix_len, + struct net_device *netdev) +{ + struct ubcore_net_addr_node *na_entry; + + na_entry = kzalloc(sizeof(struct ubcore_net_addr_node), GFP_ATOMIC); + if (na_entry == NULL) + return; + + memcpy(&na_entry->addr.net_addr, ipv6, sizeof(struct in6_addr)); + (void)ubcore_fill_netaddr_macvlan(&na_entry->addr, netdev, + UBCORE_NET_ADDR_TYPE_IPV6); + na_entry->addr.prefix_len = prefix_len; + list_add_tail(&na_entry->node, list); +} + +static void ubcore_netdev_get_ipv4(struct net_device *netdev, + struct list_head *list) +{ + struct in_ifaddr *ifa; + struct in_device *in_dev; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(netdev); + if (in_dev == NULL) { + rcu_read_unlock(); + return; + } + + in_dev_for_each_ifa_rcu(ifa, in_dev) { + ubcore_add_ipv4_entry(list, ifa->ifa_address, + ifa->ifa_prefixlen, netdev); + } + rcu_read_unlock(); +} + +static void ubcore_netdev_get_ipv6(struct net_device *netdev, + struct list_head *list) +{ + struct inet6_ifaddr *ifa; + struct inet6_dev *in_dev; + + in_dev = in6_dev_get(netdev); + if (in_dev == NULL) + return; + + read_lock_bh(&in_dev->lock); + list_for_each_entry(ifa, &in_dev->addr_list, if_list) { + ubcore_add_ipv6_entry(list, (struct in6_addr *)&ifa->addr, + ifa->prefix_len, netdev); + } + read_unlock_bh(&in_dev->lock); + in6_dev_put(in_dev); +} + +int ubcore_update_net_addr(struct ubcore_device *dev, struct net_device *netdev, + struct ubcore_net_addr *netaddr, + enum ubcore_net_addr_op op, bool async) +{ + struct ubcore_update_net_addr_req req = { 0 }; + uint32_t eid_idx = 0; + int ret = 0; + + if (!netdev) + return -EINVAL; + req.op = op; + if (ubcore_is_ub_device(dev) && dev->dynamic_eid) { + ubcore_sip_init(&req.sip_info, dev, netaddr, netdev); + req.sip_present = true; + } + + // Add eid table entry + if ((op == UBCORE_ADD_NET_ADDR || op == UBCORE_DEL_NET_ADDR)) { + ret = ubcore_update_eid_tbl(dev, netaddr, + op == UBCORE_ADD_NET_ADDR, + dev_net(netdev), &eid_idx); + if (ret == 0 && dev->dynamic_eid) { + req.eid_present = true; + req.eid_info.eid_idx = eid_idx; + req.eid_info.pattern = dev->attr.pattern; + + memcpy(req.eid_info.eid.raw, netaddr->net_addr.raw, + UBCORE_NET_ADDR_BYTES); + memcpy(req.eid_info.dev_name, dev->dev_name, + UBCORE_MAX_DEV_NAME); + } + } + + // If dev is not MUE, send msg to MUE + if (dev->attr.tp_maintainer) + ret = ubcore_hanlde_update_net_addr(dev, dev->attr.ue_idx, &req, + async); + else + ret = ubcore_send_net_addr_update_req( + dev, &req); + + // Delete eid entry if failed + if (ret != 0 && op == UBCORE_ADD_NET_ADDR && req.eid_present) + (void)ubcore_update_eid_tbl(dev, netaddr, false, + dev_net(netdev), &eid_idx); + + return ret; +} + +void ubcore_update_netdev_addr(struct ubcore_device *dev, + struct net_device *netdev, + enum ubcore_net_addr_op op, bool async) +{ + struct ubcore_net_addr_node *na_entry; + struct ubcore_net_addr_node *next; + LIST_HEAD(na_list); + + if (netdev == NULL) + return; + + /* In virtualization situation sip and eid are not from net_dev */ + if (dev->attr.virtualization) + return; + + ubcore_netdev_get_ipv4(netdev, &na_list); + ubcore_netdev_get_ipv6(netdev, &na_list); + list_for_each_entry_safe(na_entry, next, &na_list, node) { + if (na_entry->addr.net_addr.in6.subnet_prefix == + cpu_to_be64(UBCORE_LOCAL_SHUNET)) + continue; + ubcore_update_net_addr(dev, netdev, &na_entry->addr, op, async); + list_del(&na_entry->node); + kfree(na_entry); + } +} + +void ubcore_update_all_vlan_netaddr(struct ubcore_device *dev, + enum ubcore_net_addr_op op) +{ + struct net_device *net_dev; + int i; + + if (!dev->netdev) + return; + + for (i = 0; i < VLAN_N_VID; i++) { + net_dev = __vlan_find_dev_deep_rcu(dev->netdev, + htons(ETH_P_8021Q), i); + if (!net_dev) + continue; + + ubcore_update_netdev_addr(dev, net_dev, op, false); + ubcore_log_info("update dev:%s, in vlan id:%u", dev->dev_name, + i); + } +} + +static void ubcore_change_mtu(struct ubcore_device *dev, + struct net_device *netdev) +{ + ubcore_update_netdev_addr(dev, netdev, UBCORE_UPDATE_NET_ADDR, true); +} + +static int +ubcore_netdev_event_change_upper(struct ubcore_device *dev, + struct net_device *slave, + struct netdev_notifier_changeupper_info *info) +{ + struct netdev_lag_upper_info *lag_upper_info = NULL; + struct net_device *bond = info->upper_dev; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->bond_add == NULL || + dev->ops->bond_remove == NULL || info == NULL || bond == NULL || + slave == NULL) { + ubcore_log_err("Invalid parameter!\n"); + ubcore_put_device(dev); + return -EINVAL; + } + + if (info->linking && info->upper_info == NULL) { + ubcore_log_info("upper info is NULL\n"); + ubcore_put_device(dev); + return -EINVAL; + } + + ubcore_log_info("Event with master netdev %s and slave netdev %s", + netdev_name(bond), netdev_name(slave)); + + /* dev may be unregistered so it has to be put_device here */ + ubcore_put_device(dev); + + if (info->linking) { + lag_upper_info = info->upper_info; + ret = dev->ops->bond_add(bond, slave, lag_upper_info); + if (ret != 0) { + ubcore_log_err("Failed to bond_add and ret value is %d", + ret); + return -EIO; + } + } else { + ret = dev->ops->bond_remove(bond, slave); + if (ret != 0) { + ubcore_log_err( + "Failed to bond_remove and ret value is %d", + ret); + return -EIO; + } + } + + ubcore_log_info("Success to deal with event NETDEV_CHANGEUPPER"); + return 0; +} + +static int ubcore_netdev_event_change_lower_state( + struct ubcore_device *dev, struct net_device *slave, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info = NULL; + struct net_device *bond = NULL; + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->slave_update == NULL || + slave == NULL) { + ubcore_log_err("Invalid parameter!\n"); + return -EINVAL; + } + if (info == NULL || info->lower_state_info == NULL) { + ubcore_log_info("lower state info is NULL\n"); + return -EINVAL; + } + + bond = netdev_master_upper_dev_get_rcu(slave); + if (bond) { + ubcore_log_info( + "Event with master netdev %s and slave netdev %s", + netdev_name(bond), netdev_name(slave)); + } else { + ubcore_log_info( + "Event with master netdev NULL and slave netdev %s", + netdev_name(slave)); + } + lag_lower_info = info->lower_state_info; + ret = dev->ops->slave_update(bond, slave, lag_lower_info); + if (ret != 0) { + ubcore_log_err("Failed to slave_update and ret value is %d", + ret); + return -EIO; + } + ubcore_log_info("Success to deal with event NETDEV_CHANGELOWERSTATE"); + return 0; +} + +static struct net_device * +ubcore_find_master_netdev(unsigned long event, + struct netdev_notifier_changeupper_info *info, + struct net_device *slave) +{ + /* When we need to remove slaves from the bond device, + * we cannot find the ubcore dev by the netdev provided by unlink NETDEV_CHANGEUPPER. + * It has been unregistered. We need to find ubcore dev by the master netdev + */ + struct net_device *bond = NULL; + + if (event == NETDEV_CHANGEUPPER && !info->linking) + bond = info->upper_dev; + else if (event == NETDEV_CHANGELOWERSTATE) + bond = netdev_master_upper_dev_get_rcu(slave); + + return bond; +} + +static void ubcore_do_netdev_notify(unsigned long event, + struct ubcore_device *dev, + struct net_device *netdev, void *arg) +{ + switch (event) { + case NETDEV_REGISTER: + case NETDEV_UP: + case NETDEV_UNREGISTER: + case NETDEV_DOWN: + case NETDEV_CHANGEADDR: + break; + case NETDEV_CHANGEMTU: + if (dev->transport_type == UBCORE_TRANSPORT_UB) + ubcore_change_mtu(dev, netdev); + break; + default: + break; + } +} + +static void ubcore_do_netdev_bond_notify(unsigned long event, + struct ubcore_device *dev, + struct net_device *netdev, void *arg) +{ + switch (event) { + case NETDEV_CHANGEUPPER: + /* NETDEV_CHANGEUPPER event need to put_device ahead due to unregister dev */ + if (dev->transport_type == UBCORE_TRANSPORT_UB) + (void)ubcore_netdev_event_change_upper(dev, netdev, + arg); + else + ubcore_put_device(dev); + break; + case NETDEV_CHANGELOWERSTATE: + if (dev->transport_type == UBCORE_TRANSPORT_UB) + (void)ubcore_netdev_event_change_lower_state( + dev, netdev, arg); + break; + default: + break; + } +} + +static int ubcore_net_notifier_call(struct notifier_block *nb, + unsigned long event, void *arg) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(arg); + struct ubcore_device **devices; + struct net_device *master_netdev; + uint32_t num_devices = 0; + uint32_t i; + + if (netdev == NULL) + return NOTIFY_DONE; + + ubcore_log_info("Get a net event %s from ubcore_dev %s%s", + netdev_cmd_to_name(event), netdev_name(netdev), + netdev_reg_state(netdev)); + + if (event == NETDEV_CHANGEUPPER || event == NETDEV_CHANGELOWERSTATE) { + mutex_lock(&g_ubcore_net_event_lock); + devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (devices == NULL) { + master_netdev = + ubcore_find_master_netdev(event, arg, netdev); + if (master_netdev == NULL) { + ubcore_log_warn( + "Can not find master netdev by slave netdev %s", + netdev_name(netdev)); + mutex_unlock(&g_ubcore_net_event_lock); + return NOTIFY_DONE; + } + ubcore_log_info("Success to find master netdev %s", + netdev_name(master_netdev)); + devices = ubcore_get_devices_from_netdev(master_netdev, + &num_devices); + if (devices == NULL) { + ubcore_log_warn( + "Can not find devices from master netdev %s", + netdev_name(master_netdev)); + mutex_unlock(&g_ubcore_net_event_lock); + return NOTIFY_DONE; + } + } + + for (i = 0; i < num_devices; i++) + ubcore_do_netdev_bond_notify(event, devices[i], netdev, + arg); + mutex_unlock(&g_ubcore_net_event_lock); + } else { + devices = ubcore_get_devices_from_netdev(netdev, &num_devices); + if (devices == NULL) + return NOTIFY_DONE; + + for (i = 0; i < num_devices; i++) + ubcore_do_netdev_notify(event, devices[i], netdev, arg); + } + + if (event != NETDEV_CHANGEUPPER) + ubcore_put_devices(devices, num_devices); + else + kfree(devices); + + return NOTIFY_OK; +} + +static struct notifier_block ubcore_ipv6_notifier = { + .notifier_call = ubcore_ipv6_notifier_call, +}; + +static struct notifier_block ubcore_ipv4_notifier = { + .notifier_call = ubcore_ipv4_notifier_call, +}; + +static struct notifier_block ubcore_net_notifier = { + .notifier_call = ubcore_net_notifier_call, +}; + +int ubcore_register_notifiers(void) +{ + int ret; + + ret = register_netdevice_notifier(&ubcore_net_notifier); + if (ret != 0) { + pr_err("Failed to register netdev notifier, ret = %d\n", ret); + return ret; + } + ret = register_inetaddr_notifier(&ubcore_ipv4_notifier); + if (ret != 0) { + (void)unregister_netdevice_notifier(&ubcore_net_notifier); + pr_err("Failed to register inetaddr notifier, ret = %d\n", ret); + return -1; + } + ret = register_inet6addr_notifier(&ubcore_ipv6_notifier); + if (ret != 0) { + (void)unregister_inetaddr_notifier(&ubcore_ipv4_notifier); + (void)unregister_netdevice_notifier(&ubcore_net_notifier); + pr_err("Failed to register inet6addr notifier, ret = %d\n", + ret); + return -1; + } + return 0; +} + +void ubcore_unregister_notifiers(void) +{ + (void)unregister_inet6addr_notifier(&ubcore_ipv6_notifier); + (void)unregister_inetaddr_notifier(&ubcore_ipv4_notifier); + (void)unregister_netdevice_notifier(&ubcore_net_notifier); +} + +static int __init ubcore_init(void) +{ + int ret; + + if (ubcore_net_comm_init() != 0) { + ubcore_log_err("Failed init connect alpha"); + return -1; + } + ubcore_exchange_init(); + ubcore_connect_bonding_init(); + + ubcore_ue2uvs_tables_init(); + ret = ubcore_class_register(); + if (ret != 0) + return ret; + + ret = ubcore_cdev_register(); + if (ret != 0) + goto class_init; + + /* netlink_register_notifier should be executed after initializing uvs list */ + ubcore_uvs_list_init(); + ubcore_ue2uvs_tables_init(); + + ret = ubcore_genl_init(); + if (ret != 0) { + (void)pr_err("Failed to ubcore genl init\n"); + goto genl_init; + } + + ret = ubcore_register_pnet_ops(); + if (ret != 0) + goto reg_pnet; + + ret = ubcore_create_workqueues(); + if (ret != 0) { + pr_err("Failed to create all the workqueues, ret = %d\n", ret); + goto create_wq; + } + + ret = ubcm_init(); + if (ret != 0) { + pr_err("Failed to init ubcm, ret: %d.\n", ret); + goto ubcm; + } + + ubcore_log_info("ubcore module init success.\n"); + return 0; + +ubcm: + ubcore_destroy_workqueues(); +create_wq: + ubcore_unregister_pnet_ops(); +reg_pnet: + ubcore_genl_exit(); +genl_init: + ubcore_ue2uvs_tables_uninit(); + ubcore_uvs_list_uninit(); + ubcore_cdev_unregister(); +class_init: + ubcore_class_unregister(); + return ret; +} + +static void __exit ubcore_exit(void) +{ + ubcm_uninit(); + ubcore_destroy_workqueues(); + ubcore_unregister_pnet_ops(); + ubcore_genl_exit(); + ubcore_uvs_list_uninit(); + ubcore_cdev_unregister(); + ubcore_class_unregister(); + ubcore_ue2uvs_tables_uninit(); + ubcore_net_comm_uninit(); + ubcore_log_info("ubcore module exits.\n"); +} + +module_init(ubcore_init); +module_exit(ubcore_exit); + +MODULE_DESCRIPTION("Kernel module for ubus"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ub/urma/ubcore/ubcore_main.h b/drivers/ub/urma/ubcore/ubcore_main.h new file mode 100644 index 000000000000..55b1d7c6bb2a --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_main.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore main header + * Author: Zhao Yusu + * Create: 2024-02-27 + * Note: + * History: 2024-02-27: Introduce ubcore version API + */ + +#ifndef UBCORE_MAIN_H +#define UBCORE_MAIN_H + +#include "ubcore_msg.h" + +#define UBCORE_VERSION0 0x0 +#define UBCORE_VERSION UBCORE_VERSION0 +#define UBCORE_INVALID_VERSION 0xffffffff +#define UBCORE_SUPPORT_VERION_NUM 1 +#define UBCORE_CAP 0x0 + +bool ubcore_negotiated(void); +uint32_t ubcore_get_version(void); +void ubcore_set_version(uint32_t version); +uint32_t ubcore_get_cap(void); +void ubcore_set_cap(uint32_t cap); +uint32_t *ubcore_get_support_versions(void); +int ubcore_negotiate_version(struct ubcore_msg_nego_ver_req *req, uint32_t *ver, + uint32_t *cap); +int ubcore_recv_net_addr_update(struct ubcore_device *mue_dev, + struct ubcore_req_host *req); +int ubcore_send_eid_update_req(struct ubcore_device *dev, + enum ubcore_net_addr_op op, + union ubcore_eid *eid, uint32_t eid_idx, + uint32_t *upi); +int ubcore_recv_eid_update_req(struct ubcore_device *mue_dev, + struct ubcore_req_host *req); + +int ubcore_update_net_addr(struct ubcore_device *dev, struct net_device *netdev, + struct ubcore_net_addr *netaddr, + enum ubcore_net_addr_op op, bool async); + +int ubcore_register_notifiers(void); +void ubcore_unregister_notifiers(void); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_msg.c b/drivers/ub/urma/ubcore/ubcore_msg.c new file mode 100644 index 000000000000..52e388f2c40d --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_msg.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore message table implementation + * Author: Yang Yijian + * Create: 2023-07-05 + * Note: + * History: 2023-07-05: Create file + */ + +#include +#include +#include +#include +#include "ubcore_log.h" +#include +#include "ubcore_netlink.h" +#include "ubcore_vtp.h" +#include +#include "ubcore_priv.h" +#include "ubcore_workqueue.h" +#include "ubcore_main.h" +#include "ubcore_uvs.h" +#include "ubcore_device.h" +#include "ubcore_msg.h" + +#define MS_PER_SEC 1000 +static LIST_HEAD(g_msg_session_list); +static DEFINE_SPINLOCK(g_msg_session_lock); +static atomic_t g_msg_seq = ATOMIC_INIT(0); + +static uint32_t ubcore_get_msg_seq(void) +{ + return (uint32_t)atomic_inc_return(&g_msg_seq); +} + +void ubcore_free_msg_session(struct kref *kref) +{ + struct ubcore_msg_session *s = + container_of(kref, struct ubcore_msg_session, kref); + unsigned long flags; + + spin_lock_irqsave(&g_msg_session_lock, flags); + list_del(&s->node); + spin_unlock_irqrestore(&g_msg_session_lock, flags); + + mutex_destroy(&s->session_lock); + kfree(s); +} + +struct ubcore_msg_session *ubcore_find_msg_session(uint32_t seq) +{ + struct ubcore_msg_session *tmp, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_msg_session_lock, flags); + list_for_each_entry(tmp, &g_msg_session_list, node) { + if (tmp->req == NULL) + continue; + if (tmp->req->msg_id == seq) { + target = tmp; + kref_get(&target->kref); + break; + } + } + spin_unlock_irqrestore(&g_msg_session_lock, flags); + return target; +} + +void ubcore_destroy_msg_session(struct ubcore_msg_session *s) +{ + (void)kref_put(&s->kref, ubcore_free_msg_session); +} + +static struct ubcore_msg_session * +ubcore_create_msg_session(struct ubcore_req *req) +{ + struct ubcore_msg_session *s; + unsigned long flags; + + s = kzalloc(sizeof(struct ubcore_msg_session), GFP_KERNEL); + if (s == NULL) + return NULL; + + s->req = req; + s->resp = NULL; + s->is_async = false; + s->vtpn = NULL; + s->msg_id = req->msg_id; + mutex_init(&s->session_lock); + s->session_state = UBCORE_SESSION_INIT; + kref_init(&s->kref); + init_completion(&s->comp); + spin_lock_irqsave(&g_msg_session_lock, flags); + kref_get(&s->kref); + list_add_tail(&s->node, &g_msg_session_list); + spin_unlock_irqrestore(&g_msg_session_lock, flags); + return s; +} + +bool ubcore_set_session_finish(struct ubcore_msg_session *s) +{ + if (s->session_state == UBCORE_SESSION_FINISH) + return false; + + if (!mutex_trylock(&s->session_lock)) + return false; + + if (s->session_state == UBCORE_SESSION_FINISH) { + mutex_unlock(&s->session_lock); + return false; + } + s->session_state = UBCORE_SESSION_FINISH; + mutex_unlock(&s->session_lock); + return true; +} + +int ubcore_send_req(struct ubcore_device *dev, struct ubcore_req *req) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->send_req == NULL || + req->len > UBCORE_MAX_MSG) { + ubcore_log_err("Invalid parameter!\n"); + return -EINVAL; + } + + ret = dev->ops->send_req(dev, req); + if (ret != 0) { + ubcore_log_err("Failed to send message! msg_id = %u!\n", + req->msg_id); + return ret; + } + return 0; +} + +int ubcore_send_resp(struct ubcore_device *dev, + struct ubcore_resp_host *resp_host) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->send_resp == NULL || + resp_host == NULL || resp_host->resp.len > UBCORE_MAX_MSG) { + ubcore_log_err("Invalid parameter!\n"); + return -EINVAL; + } + + ret = dev->ops->send_resp(dev, resp_host); + if (ret != 0) { + ubcore_log_err("Failed to send message! msg_id = %u!\n", + resp_host->resp.msg_id); + return ret; + } + return 0; +} + +struct ubcore_msg_session * +ubcore_create_ue2mue_session(struct ubcore_req *req, struct ubcore_vtpn *vtpn) +{ + struct ubcore_msg_session *s; + + req->msg_id = ubcore_get_msg_seq(); + s = ubcore_create_msg_session(req); + if (s == NULL) { + ubcore_log_err("Failed to create req session!\n"); + return NULL; + } + s->is_async = true; + s->vtpn = vtpn; + return s; +} + +static int ubcore_msg_discover_eid_cb(struct ubcore_device *dev, + struct ubcore_resp *resp, void *msg_ctx) +{ + struct ubcore_msg_discover_eid_resp *data; + struct net *net = (struct net *)msg_ctx; + bool is_alloc_eid; + + if (dev == NULL || resp == NULL || + resp->len < sizeof(struct ubcore_msg_discover_eid_resp)) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + data = (struct ubcore_msg_discover_eid_resp *)(void *)resp->data; + if (data == NULL || data->ret != 0 || + (resp->opcode != UBCORE_MSG_ALLOC_EID && + resp->opcode != UBCORE_MSG_DEALLOC_EID)) { + ubcore_log_err( + "Failed to query data from the UVS. Use the default value.\n"); + return -EINVAL; + } + + is_alloc_eid = (resp->opcode == UBCORE_MSG_ALLOC_EID); + if (ubcore_update_eidtbl_by_idx(dev, &data->eid, data->eid_index, + is_alloc_eid, net) != 0) + return -1; + + return 0; +} + +/** + * If you do not need to wait for the response of a message, use ubcore_asyn_send_ue2mue_msg. + */ +struct ubcore_msg_session * +ubcore_asyn_send_ue2mue_msg(struct ubcore_device *dev, struct ubcore_req *req) +{ + struct ubcore_msg_session *s; + int ret; + + req->msg_id = ubcore_get_msg_seq(); + s = ubcore_create_msg_session(req); + if (s == NULL) { + ubcore_log_err("Failed to create req session!\n"); + return NULL; + } + + ret = ubcore_send_req(dev, req); + (void)kref_put(&s->kref, ubcore_free_msg_session); + if (ret != 0) { + ubcore_log_err( + "Failed to send req, msg_id = %u, opcode = %u.\n", + req->msg_id, (uint16_t)req->opcode); + ubcore_destroy_msg_session(s); + return NULL; + } + return s; +} + +int ubcore_msg_discover_eid(struct ubcore_device *dev, uint32_t eid_index, + enum ubcore_msg_opcode op, struct net *net, + struct ubcore_update_eid_ctx *ctx) +{ + struct ubcore_msg_discover_eid_req *data; + struct ubcore_msg_session *s; + struct ubcore_req *req_msg; + uint32_t data_len; + + ctx->cb.callback = ubcore_msg_discover_eid_cb; + ctx->cb.user_arg = net; + data_len = sizeof(struct ubcore_msg_discover_eid_req); + req_msg = kcalloc(1, sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->len = data_len; + req_msg->opcode = op; + data = (struct ubcore_msg_discover_eid_req *)req_msg->data; + data->eid_index = eid_index; + data->eid_type = dev->attr.pattern; + data->virtualization = dev->attr.virtualization; + memcpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + + s = ubcore_asyn_send_ue2mue_msg(dev, req_msg); + if (s == NULL) { + ubcore_log_err("send ue2mue failed.\n"); + kfree(req_msg); + return -1; + } + ctx->req_msg = req_msg; + ctx->s = s; + return 0; +} + +/** + * if the operation times out or is successful, 0 is returned and reply done to urma_admin. + * if the operation is waiting for the result, 1 is returned and reply dump to urma_admin. + */ +int ubcore_update_uvs_eid_ret(struct ubcore_update_eid_ctx *ctx) +{ + long start_ts = ctx->start_ts; + long leave_time = 0; + struct timespec64 tv; + bool is_done; + + is_done = try_wait_for_completion(&ctx->s->comp); + if (is_done == false) { + ktime_get_ts64(&tv); + leave_time = tv.tv_sec - start_ts; + if (leave_time * MS_PER_SEC < UBCORE_TYPICAL_TIMEOUT) + return 1; + + ubcore_log_err( + "waiting req reply timeout, msg_id = %u, opcode = %u, leavetime = %ld.\n", + ctx->req_msg->msg_id, (uint16_t)ctx->req_msg->opcode, + leave_time); + return -EAGAIN; + } + + ubcore_log_info("waiting req reply success, msg_id = %u, opcode = %u\n", + ctx->req_msg->msg_id, (uint16_t)ctx->req_msg->opcode); + + if (ctx->cb.callback(ctx->dev, ctx->s->resp, ctx->cb.user_arg) != 0) + return -EINVAL; + + return 0; +} + +int ubcore_recv_req(struct ubcore_device *dev, struct ubcore_req_host *req) +{ + return 0; +} +EXPORT_SYMBOL(ubcore_recv_req); + +int ubcore_recv_resp(struct ubcore_device *dev, struct ubcore_resp *resp) +{ + return 0; +} +EXPORT_SYMBOL(ubcore_recv_resp); diff --git a/drivers/ub/urma/ubcore/ubcore_msg.h b/drivers/ub/urma/ubcore/ubcore_msg.h new file mode 100644 index 000000000000..2dc2c9761804 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_msg.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore msg table header + * Author: Yang Yijian + * Create: 2023-07-05 + * Note: + * History: 2023-07-05: Create file + */ + +#ifndef UBCORE_MSG_H +#define UBCORE_MSG_H + +#include + +// Must be the same as TPSA_NL_RESP_XXX +#define UBCORE_MSG_RESP_LIMIT_RATE (-EBUSY) +#define UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND (-EEXIST) +#define UBCORE_MSG_RESP_IN_PROGRESS (-EINPROGRESS) +#define UBCORE_MSG_RESP_FAIL (-EPERM) +#define UBCORE_MSG_RESP_SUCCESS 0 + +typedef int (*ubcore_req_handler)(struct ubcore_device *dev, + struct ubcore_req_host *req); +typedef int (*ubcore_resp_handler)(struct ubcore_device *dev, + struct ubcore_resp *msg, void *msg_ctx); + +struct ubcore_resp_cb { + void *user_arg; + ubcore_resp_handler callback; +}; + +enum ubcore_session_type { UBCORE_SESSION_INIT = 0, UBCORE_SESSION_FINISH }; + +struct ubcore_msg_session { + struct list_head node; + uint32_t msg_id; + bool is_async; + struct ubcore_vtpn *vtpn; + struct mutex session_lock; + enum ubcore_session_type session_state; + struct ubcore_req *req; + struct ubcore_resp *resp; + struct kref kref; + struct completion + comp; /* Synchronization event of timeout sleep and thread wakeup */ + struct ubcore_resp_cb cb; +}; + +struct ubcore_msg_config_device_req { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t max_rc_cnt; + uint32_t max_rc_depth; + uint32_t min_slice; /* TA slice size byte */ + uint32_t max_slice; /* TA slice size byte */ + bool is_mue_dev; + bool virtualization; + char muedev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_msg_config_device_resp { + int ret; + uint32_t rc_cnt; + uint32_t rc_depth; + uint32_t slice; /* TA slice size byte */ + uint32_t set_slice; + bool is_mue_dev; + uint32_t suspend_period; + uint32_t suspend_cnt; +}; + +struct ubcore_msg_discover_eid_req { + uint32_t eid_index; + char dev_name[UBCORE_MAX_DEV_NAME]; + enum ubcore_pattern eid_type; + bool virtualization; + char muedev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_msg_discover_eid_resp { + uint32_t ret; + union ubcore_eid eid; + uint32_t eid_index; + uint32_t upi; + uint16_t ue_idx; +}; + +struct ubcore_msg_nego_ver_req { + uint32_t cap; + uint32_t version_num; + uint32_t versions[]; +}; + +struct ubcore_msg_nego_ver_resp { + int ret; + uint32_t cap; + uint32_t version; +}; + +struct ubcore_function_mig_req { + uint16_t mig_ue_idx; +}; + +struct ubcore_function_mig_resp { + uint16_t mig_ue_idx; + enum ubcore_mig_resp_status status; +}; + +struct ubcore_eid_update_info { + uint32_t pattern; + uint32_t eid_idx; + union ubcore_eid eid; + char dev_name[UBCORE_MAX_DEV_NAME]; + bool upi_present; + uint32_t upi; +}; + +struct ubcore_update_net_addr_req { + enum ubcore_net_addr_op op; + bool sip_present; + struct ubcore_sip_info sip_info; + bool eid_present; + struct ubcore_eid_update_info eid_info; +}; + +struct ubcore_update_eid_req { + enum ubcore_net_addr_op op; + struct ubcore_eid_update_info eid_info; +}; + +struct ubcore_update_eid_ctx { + struct ubcore_device *dev; + struct ubcore_req *req_msg; + struct ubcore_msg_session *s; + struct net *net; + struct ubcore_resp_cb cb; + long start_ts; +}; + +// MUE notify MUE/UE to update eid +struct ubcore_update_eid_tbl_notify { + bool is_alloc_eid; + union ubcore_eid eid; + uint32_t eid_idx; +}; + +int ubcore_send_req(struct ubcore_device *dev, struct ubcore_req *req); +int ubcore_send_resp(struct ubcore_device *dev, + struct ubcore_resp_host *resp_host); +/* caller should free memory of req after return */ +struct ubcore_msg_session * +ubcore_create_ue2mue_session(struct ubcore_req *req, struct ubcore_vtpn *vtpn); +int ubcore_msg_discover_eid(struct ubcore_device *dev, uint32_t eid_index, + enum ubcore_msg_opcode op, struct net *net, + struct ubcore_update_eid_ctx *ctx); +void ubcore_free_msg_session(struct kref *kref); +void ubcore_destroy_msg_session(struct ubcore_msg_session *s); +int ubcore_update_uvs_eid_ret(struct ubcore_update_eid_ctx *ctx); +struct ubcore_msg_session *ubcore_find_msg_session(uint32_t seq); +bool ubcore_set_session_finish(struct ubcore_msg_session *s); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_netdev.c b/drivers/ub/urma/ubcore/ubcore_netdev.c new file mode 100644 index 000000000000..4391eca25862 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netdev.c @@ -0,0 +1,709 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netdev module + * Author: Chen Wen + * Create: 2023-08-27 + * Note: + * History: 2023-08-27: create file + */ +#include +#include +#include + +#include +#include "ubcore_log.h" +#include +#include "ubcore_netlink.h" +#include "ubcore_uvs.h" +#include "ubcore_priv.h" + +static DECLARE_RWSEM(g_port_list_lock); + +struct ubcore_ndev_port { + struct net_device *ndev; + uint8_t port_list[UBCORE_MAX_PORT_CNT]; + bool valid_list[UBCORE_MAX_PORT_CNT]; + uint8_t port_cnt; + struct list_head node; + char dev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_sip_info * +ubcore_lookup_sip_info_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx) +{ + struct ubcore_sip_info *sip = NULL; + + if (idx >= sip_table->max_sip_cnt || + !sip_table->entry[idx].sip_info.is_active) { + ubcore_log_err("sip node does not exist"); + return NULL; + } + sip = &sip_table->entry[idx].sip_info; + return sip; +} + +static struct ubcore_nlmsg * +ubcore_alloc_sip_req(enum ubcore_cmd msg_type, + enum ubcore_transport_type transport_type, + uint32_t payload_len, struct ubcore_sip_info *sip_info) +{ + struct ubcore_nlmsg *req_msg; + + req_msg = + kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); + if (req_msg == NULL) + return NULL; + + req_msg->msg_type = msg_type; + req_msg->transport_type = transport_type; + (void)memcpy(req_msg->dst_eid.raw, sip_info->addr.net_addr.raw, + UBCORE_EID_SIZE); + (void)memcpy(req_msg->src_eid.raw, sip_info->addr.net_addr.raw, + UBCORE_EID_SIZE); + req_msg->payload_len = payload_len; + + return req_msg; +} + +int ubcore_notify_uvs_del_sip(struct ubcore_device *dev, + struct ubcore_sip_info *sip_info, uint32_t index) +{ + struct ubcore_uvs_instance **uvs_list = NULL; + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_del_sip_req *sip_req; + struct ubcore_del_sip_resp *resp; + int success_count = 0; + int count = 0; + int i; + + ubcore_log_info("notify all uvs to del sip: mue %s, sip_idx %u\n", + sip_info->dev_name, index); + + req_msg = ubcore_alloc_sip_req(UBCORE_CMD_DEL_SIP_REQ, + dev->transport_type, + sizeof(struct ubcore_del_sip_req), + sip_info); + if (req_msg == NULL) + return -ENOMEM; + + sip_req = (struct ubcore_del_sip_req *)(void *)req_msg->payload; + sip_req->index = index; + (void)memcpy(sip_req->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + + uvs_list = ubcore_uvs_list_get_all_alive(&count); + for (i = 0; i < count; i++) { + ubcore_log_info( + "sending del_sip request to uvs %s, uvs_id %u\n", + uvs_list[i]->name, uvs_list[i]->id); + + resp_msg = ubcore_nl_send_wait(dev, req_msg, uvs_list[i]); + if (resp_msg == NULL) { + ubcore_log_err( + "Failed to wait query response from uvs %s, uvs_id %u\n", + uvs_list[i]->name, uvs_list[i]->id); + continue; + } + resp = (struct ubcore_del_sip_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_CMD_DEL_SIP_RESP || + resp_msg->payload_len != + sizeof(struct ubcore_del_sip_resp) || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err( + "del sip is rejected with type %d from uvs %s, uvs_id %u, ret %d\n", + resp_msg->msg_type, uvs_list[i]->name, + uvs_list[i]->id, + (resp == NULL ? 1 : resp->ret)); + } else + success_count++; + + kfree(resp_msg); + } + + ubcore_uvs_list_put(uvs_list, count); + kfree(req_msg); + + if (count != 0 && success_count == count) + return 0; + else + return -1; +} + +struct ubcore_nlmsg *ubcore_new_sip_req_msg(struct ubcore_device *dev, + struct ubcore_sip_info *sip_info, + uint32_t index) +{ + struct ubcore_add_sip_req *sip_req; + struct ubcore_nlmsg *req_msg; + + req_msg = ubcore_alloc_sip_req(UBCORE_CMD_ADD_SIP_REQ, + dev->transport_type, + sizeof(struct ubcore_add_sip_req), + sip_info); + if (req_msg == NULL) + return NULL; + + sip_req = (struct ubcore_add_sip_req *)(void *)req_msg->payload; + (void)memcpy(sip_req->dev_name, sip_info->dev_name, + UBCORE_MAX_DEV_NAME); + (void)memcpy(&sip_req->netaddr, &sip_info->addr, + sizeof(struct ubcore_net_addr)); + sip_req->index = index; + sip_req->port_cnt = sip_info->port_cnt; + (void)memcpy(sip_req->port_id, sip_info->port_id, UBCORE_MAX_PORT_CNT); + sip_req->mtu = sip_info->mtu; + + if (strnlen(sip_info->netdev_name, UBCORE_MAX_DEV_NAME) == + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("sip_info->netdev_name len is invalid"); + kfree(req_msg); + return NULL; + } + (void)memcpy(sip_req->netdev_name, sip_info->netdev_name, + UBCORE_MAX_DEV_NAME); + return req_msg; +} + +int ubcore_notify_uvs_add_sip(struct ubcore_device *dev, + struct ubcore_sip_info *sip_info, uint32_t index) +{ + struct ubcore_uvs_instance **uvs_list = NULL; + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_add_sip_resp *resp; + int success_count = 0; + int count = 0; + int i; + + ubcore_log_info("notify all uvs to add sip: mue %s, sip_idx %u\n", + sip_info->dev_name, index); + + req_msg = ubcore_new_sip_req_msg(dev, sip_info, index); + if (req_msg == NULL) + return -ENOMEM; + + uvs_list = ubcore_uvs_list_get_all_alive(&count); + for (i = 0; i < count; i++) { + ubcore_log_info( + "sending add_sip request to uvs %s, uvs_id %u\n", + uvs_list[i]->name, uvs_list[i]->id); + + resp_msg = ubcore_nl_send_wait(dev, req_msg, uvs_list[i]); + if (resp_msg == NULL) { + ubcore_log_err( + "Failed to wait query response from uvs %s, uvs_id %u\n", + uvs_list[i]->name, uvs_list[i]->id); + continue; + } + + resp = (struct ubcore_add_sip_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_CMD_ADD_SIP_RESP || + resp_msg->payload_len != + sizeof(struct ubcore_add_sip_resp) || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err( + "add sip is rejected with type %d from uvs %s, uvs_id %u, ret %d\n", + resp_msg->msg_type, uvs_list[i]->name, + uvs_list[i]->id, + (resp == NULL ? 1 : resp->ret)); + } else + success_count++; + + kfree(resp_msg); + } + + ubcore_uvs_list_put(uvs_list, count); + kfree(req_msg); + + if (count != 0 && success_count == count) + return 0; + else + return -1; +} + +int ubcore_check_port_state(struct ubcore_device *dev) +{ + struct ubcore_device_status status = { 0 }; + uint32_t i; + + if (dev == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_query_device_status(dev, &status) != 0) { + ubcore_log_err( + "query device status for state failed with dev name %s\n", + dev->dev_name); + return -EPERM; + } + + for (i = 0; i < UBCORE_MAX_PORT_CNT; i++) { + if (status.port_status[i].state == UBCORE_PORT_ACTIVE) { + ubcore_log_debug( + "Success to query dev %s - port %u state and it's active.\n", + dev->dev_name, i); + return 0; + } + } + ubcore_log_err("port state is not active with dev name: %s\n", + dev->dev_name); + return -EPERM; +} + +void ubcore_fill_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + uint8_t *port_list, uint8_t *port_cnt) +{ + struct net_device *real_netdev = NULL; + struct ubcore_ndev_port *port_info; + + if (!ndev) + return; + if (is_vlan_dev(ndev)) + real_netdev = vlan_dev_real_dev(ndev); + else + real_netdev = ndev; + + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (port_info->ndev == real_netdev) { + (void)memcpy(port_list, port_info->port_list, + UBCORE_MAX_PORT_CNT); + *port_cnt = port_info->port_cnt; + up_write(&g_port_list_lock); + ubcore_log_info( + "Success to fill in port_list with port cnt: %u and dev_name %s", + *port_cnt, port_info->dev_name); + return; + } + } + up_write(&g_port_list_lock); + ubcore_log_warn("ndev:%s no available port found.\n", + netdev_name(ndev)); + /* Currently assigned port0 by default; So, here we don't need to change */ +} + +static int ubcore_add_new_port(struct ubcore_ndev_port *port_info, + uint8_t port_id, struct ubcore_device *dev, + struct net_device *ndev) +{ + uint8_t i; + + if (port_info->port_cnt >= UBCORE_MAX_PORT_CNT) { + ubcore_log_err( + "Failed to add port because it's over the max length"); + return -1; + } + for (i = 0; i < UBCORE_MAX_PORT_CNT; i++) { + if (!port_info->valid_list[i]) { + port_info->port_list[i] = (uint8_t)port_id; + port_info->valid_list[i] = true; + port_info->port_cnt++; + ubcore_log_info( + "ndev:%s dev_name: %s bound port%u: %u\n", + netdev_name(ndev), dev->dev_name, i, port_id); + break; + } + } + return 0; +} + +static int ubcore_port_duplicate_check(struct ubcore_ndev_port *port_info, + uint8_t port_id, + struct ubcore_device *dev, + struct net_device *ndev) +{ + uint8_t i; + + for (i = 0; i < UBCORE_MAX_PORT_CNT; i++) { + if (port_info->valid_list[i] && + port_info->port_list[i] == port_id) { + ubcore_log_err( + "ndev:%s dev_name: %s bound port%u: %u is already in the list\n", + netdev_name(ndev), dev->dev_name, i, port_id); + return -1; + } + } + return 0; +} + +int ubcore_set_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id) +{ + struct ubcore_ndev_port *port_info, *new_node; + + if (dev == NULL || ndev == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + strnlen(netdev_name(ndev), UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("invalid input parameter.\n"); + return -1; + } + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (port_info->ndev == ndev) { + if (ubcore_port_duplicate_check(port_info, + (uint8_t)port_id, dev, + ndev) != 0) { + up_write(&g_port_list_lock); + ubcore_log_err("Failed to do %s", __func__); + return -1; + } + if (ubcore_add_new_port(port_info, (uint8_t)port_id, + dev, ndev) != 0) { + up_write(&g_port_list_lock); + ubcore_log_err("Failed to ubcore_add_new_port"); + return -1; + } + up_write(&g_port_list_lock); + /* sync to sip table */ + ubcore_update_netdev_addr(dev, dev->netdev, + UBCORE_UPDATE_NET_ADDR, + false); + ubcore_update_all_vlan_netaddr(dev, + UBCORE_UPDATE_NET_ADDR); + return 0; + } + } + up_write(&g_port_list_lock); + + /* ndev port dones't exist, add new entry */ + new_node = kzalloc(sizeof(struct ubcore_ndev_port), GFP_ATOMIC); + if (new_node == NULL) + return -ENOMEM; + + new_node->ndev = ndev; + new_node->port_list[0] = (uint8_t)port_id; + new_node->valid_list[0] = true; + new_node->port_cnt = 1; + (void)memcpy(new_node->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + down_write(&g_port_list_lock); + list_add_tail(&new_node->node, &dev->port_list); + up_write(&g_port_list_lock); + ubcore_log_info("ndev:%s bound port[0]: %u\n", netdev_name(ndev), + new_node->port_list[0]); + ubcore_update_netdev_addr(dev, dev->netdev, UBCORE_UPDATE_NET_ADDR, + false); + ubcore_update_all_vlan_netaddr(dev, UBCORE_UPDATE_NET_ADDR); + return 0; +} +EXPORT_SYMBOL(ubcore_set_port_netdev); + +/* del corresponding port id, if the port list cnt is 0, it will del the entry */ +static int ubcore_del_port(struct ubcore_ndev_port *port_info, uint8_t port_id, + struct ubcore_device *dev, struct net_device *ndev) +{ + bool del = false; + uint8_t i; + + if (!ndev) { + ubcore_log_err("Invalid netdev.\n"); + return -EINVAL; + } + for (i = 0; i < UBCORE_MAX_PORT_CNT; i++) { + if (port_info->valid_list[i] && + port_info->port_list[i] == port_id) { + port_info->port_list[i] = 0; + port_info->valid_list[i] = false; + port_info->port_cnt--; + del = true; + ubcore_log_info( + "ndev:%s dev_name: %s bound port%u: %u has been deleted\n", + netdev_name(ndev), dev->dev_name, i, port_id); + break; + } + } + if (!del) { + ubcore_log_info( + "ndev:%s dev_name: %s bound port: %u cannot be found\n", + netdev_name(ndev), dev->dev_name, port_id); + return -1; + } + + if (port_info->port_cnt == 0) { + list_del(&port_info->node); + kfree(port_info); + ubcore_log_info( + "ndev:%s bound port_list has been remove entirely\n", + netdev_name(ndev)); + } + return 0; +} + +int ubcore_unset_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id) +{ + struct ubcore_ndev_port *port_info; + + if (dev == NULL || ndev == NULL || + strnlen(dev->dev_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME || + strnlen(netdev_name(ndev), UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) { + ubcore_log_err("invalid input parameter.\n"); + return -1; + } + down_write(&g_port_list_lock); + list_for_each_entry(port_info, &dev->port_list, node) { + if (port_info->ndev == ndev) { + if (ubcore_del_port(port_info, (uint8_t)port_id, dev, + ndev) != 0) { + up_write(&g_port_list_lock); + ubcore_log_err("Failed to do ubcore_del_port"); + return -1; + } + up_write(&g_port_list_lock); + ubcore_update_netdev_addr(dev, dev->netdev, + UBCORE_UPDATE_NET_ADDR, + false); + return 0; + } + } + up_write(&g_port_list_lock); + + ubcore_log_err( + "Failed to find and remove ndev:%s dev_name: %s bound port: %u\n", + netdev_name(ndev), dev->dev_name, port_id); + return -1; +} +EXPORT_SYMBOL(ubcore_unset_port_netdev); + +void ubcore_free_netdev_port_list(struct ubcore_device *dev) +{ + struct ubcore_ndev_port *port_info, *next; + + if (dev == NULL) { + ubcore_log_warn("invalid input dev is null_ptr.\n"); + return; + } + + down_write(&g_port_list_lock); + list_for_each_entry_safe(port_info, next, &dev->port_list, node) { + if (port_info != NULL) { + if (port_info->port_cnt != 0) { + port_info->port_cnt = 0; + (void)memset(port_info->port_list, 0, + sizeof(uint8_t) * + UBCORE_MAX_PORT_CNT); + } + list_del(&port_info->node); + kfree(port_info); + } + } + up_write(&g_port_list_lock); +} + +void ubcore_put_port_netdev(struct ubcore_device *dev) +{ + ubcore_free_netdev_port_list(dev); + ubcore_update_netdev_addr(dev, dev->netdev, UBCORE_UPDATE_NET_ADDR, + false); +} +EXPORT_SYMBOL(ubcore_put_port_netdev); + +int ubcore_sip_table_init(struct ubcore_sip_table *sip_table, uint32_t size) +{ + uint32_t tmp = UBCORE_MAX_SIP; + + if (size != 0 && size < UBCORE_MAX_SIP) { + tmp = size; + ubcore_log_info("sip size init %u complete.\n", tmp); + } else { + ubcore_log_warn("sip size %u err, use default value %u.\n", + size, tmp); + } + bitmap_zero(sip_table->index_bitmap, UBCORE_MAX_SIP); + sip_table->entry = + kcalloc(tmp, sizeof(struct ubcore_sip_entry), GFP_KERNEL); + if (sip_table->entry == NULL) + return -1; + sip_table->max_sip_cnt = tmp; + mutex_init(&sip_table->lock); + return 0; +} + +void ubcore_sip_table_uninit(struct ubcore_sip_table *sip_table) +{ + mutex_lock(&sip_table->lock); + if (sip_table->entry != NULL) { + kfree(sip_table->entry); + sip_table->entry = NULL; + } + mutex_unlock(&sip_table->lock); + mutex_destroy(&sip_table->lock); +} + +int ubcore_sip_idx_alloc(struct ubcore_sip_table *sip_table) +{ + uint32_t ret_idx; + + mutex_lock(&sip_table->lock); + ret_idx = (uint32_t)find_first_zero_bit(sip_table->index_bitmap, + UBCORE_MAX_SIP); + if (ret_idx >= UBCORE_MAX_SIP) { + ubcore_log_err("idx allocation failed.\n"); + mutex_unlock(&sip_table->lock); + return -1; + } + set_bit(ret_idx, sip_table->index_bitmap); + mutex_unlock(&sip_table->lock); + return (int)ret_idx; +} + +int ubcore_sip_idx_free_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx) +{ + if (test_bit(idx, sip_table->index_bitmap) == false) { + ubcore_log_err("idx:%u is not used.\n", idx); + return -EINVAL; + } + clear_bit(idx, sip_table->index_bitmap); + return 0; +} + +int ubcore_sip_idx_free(struct ubcore_sip_table *sip_table, uint32_t idx) +{ + int ret = 0; + + mutex_lock(&sip_table->lock); + ret = ubcore_sip_idx_free_without_lock(sip_table, idx); + mutex_unlock(&sip_table->lock); + return ret; +} + +int ubcore_add_sip_entry(struct ubcore_sip_table *sip_table, + const struct ubcore_sip_info *sip, uint32_t idx) +{ + mutex_lock(&sip_table->lock); + if (idx >= sip_table->max_sip_cnt || + sip_table->entry[idx].sip_info.is_active) { + mutex_unlock(&sip_table->lock); + ubcore_log_err("Parameters are illegal.\n"); + return -EINVAL; + } + + (void)memcpy(&sip_table->entry[idx].sip_info, sip, + sizeof(struct ubcore_sip_info)); + sip_table->entry[idx].sip_info.is_active = true; + atomic_set(&sip_table->entry[idx].uvs_cnt, 0); + mutex_unlock(&sip_table->lock); + ubcore_log_info( + "mue_dev_name: %s sip table add entry idx: %d. addr: %pI6c\n", + sip->dev_name, idx, &sip->addr.net_addr); + return 0; +} + +int ubcore_del_net_addr(struct ubcore_device *dev, uint32_t idx) +{ + if (dev == NULL || dev->ops == NULL || + dev->ops->delete_net_addr == NULL) { + ubcore_log_err("Invalid ops"); + return -EINVAL; + } + return dev->ops->delete_net_addr(dev, idx); +} + +int ubcore_del_sip_entry_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx) +{ + if (idx >= sip_table->max_sip_cnt || + !sip_table->entry[idx].sip_info.is_active) { + ubcore_log_err("Parameters are illegal.\n"); + return -EINVAL; + } + + ubcore_log_info("mue_name: %s del sip entry idx: %u, addr: %pI6c.\n", + sip_table->entry[idx].sip_info.dev_name, idx, + &sip_table->entry[idx].sip_info.addr.net_addr); + sip_table->entry[idx].sip_info.is_active = false; + return 0; +} + +int ubcore_del_sip_entry(struct ubcore_sip_table *sip_table, uint32_t idx) +{ + int ret = 0; + + mutex_lock(&sip_table->lock); + ret = ubcore_del_sip_entry_without_lock(sip_table, idx); + mutex_unlock(&sip_table->lock); + return ret; +} + +static bool ubcore_sip_compare(struct ubcore_sip_info *sip_entry, + struct ubcore_sip_info *del_sip) +{ + if ((memcmp(sip_entry->dev_name, del_sip->dev_name, + sizeof(char) * UBCORE_MAX_DEV_NAME) == 0) && + (memcmp(&sip_entry->addr.net_addr, &del_sip->addr.net_addr, + sizeof(union ubcore_net_addr_union)) == 0) && + (memcmp(sip_entry->netdev_name, del_sip->netdev_name, + sizeof(struct ubcore_net_addr)) == 0)) + return true; + + return false; +} + +int ubcore_update_sip_entry(struct ubcore_sip_table *sip_table, + struct ubcore_sip_info *new_sip, uint32_t *sip_idx, + struct ubcore_sip_info *old_sip) +{ + uint32_t i; + int ret = -ENOENT; + + if (!sip_table || !new_sip || !sip_idx || !old_sip) + return -EINVAL; + + mutex_lock(&sip_table->lock); + for (i = 0; i < sip_table->max_sip_cnt; i++) { + if (!sip_table->entry[i].sip_info.is_active || + !ubcore_sip_compare(&sip_table->entry[i].sip_info, new_sip)) + continue; + + *sip_idx = i; + *old_sip = sip_table->entry[i].sip_info; + + sip_table->entry[i].sip_info = *new_sip; + sip_table->entry[i].sip_info.is_active = true; + ret = 0; + ubcore_log_info( + "mue_name: %s update sip entry idx: %d, addr: %pI6c.", + sip_table->entry[i].sip_info.dev_name, i, + &sip_table->entry[i].sip_info.addr.net_addr); + break; + } + mutex_unlock(&sip_table->lock); + return ret; +} + +int ubcore_lookup_sip_idx(struct ubcore_sip_table *sip_table, + struct ubcore_sip_info *sip, uint32_t *idx) +{ + uint32_t i; + + mutex_lock(&sip_table->lock); + for (i = 0; i < sip_table->max_sip_cnt; i++) { + if (sip_table->entry[i].sip_info.is_active && + ubcore_sip_compare(&sip_table->entry[i].sip_info, sip)) { + *idx = i; + break; + } + } + if (i == sip_table->max_sip_cnt) { + mutex_unlock(&sip_table->lock); + ubcore_log_warn("no available idx found.\n"); + return -EINVAL; + } + mutex_unlock(&sip_table->lock); + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcore_netdev.h b/drivers/ub/urma/ubcore/ubcore_netdev.h new file mode 100644 index 000000000000..f22f54ce13eb --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netdev.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netdev head file + * Author: Chen Wen + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#ifndef UBCORE_NETDEV_H +#define UBCORE_NETDEV_H + +#include + +int ubcore_check_port_state(struct ubcore_device *dev); +void ubcore_fill_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + uint8_t *port_list, uint8_t *port_cnt); + +int ubcore_sip_table_init(struct ubcore_sip_table *sip_table, uint32_t size); +void ubcore_sip_table_uninit(struct ubcore_sip_table *sip_table); + +uint32_t ubcore_sip_idx_alloc(struct ubcore_sip_table *sip_table); +int ubcore_sip_idx_free_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx); +int ubcore_sip_idx_free(struct ubcore_sip_table *sip_table, uint32_t idx); + +int ubcore_add_sip_entry(struct ubcore_sip_table *sip_table, + struct ubcore_sip_info *sip, uint32_t idx); + +int ubcore_del_net_addr(struct ubcore_device *dev, uint32_t idx); +int ubcore_del_sip_entry_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx); +int ubcore_del_sip_entry(struct ubcore_sip_table *sip_table, uint32_t idx); +int ubcore_lookup_sip_idx(struct ubcore_sip_table *sip_table, + struct ubcore_sip_info *sip, uint32_t *idx); +int ubcore_update_sip_entry(struct ubcore_sip_table *sip_table, + struct ubcore_sip_info *new_sip, uint32_t *sip_idx, + struct ubcore_sip_info *old_sip); +struct ubcore_device * +ubcore_lookup_mue_by_sip_addr(union ubcore_net_addr_union *addr, + enum ubcore_transport_type type); +int ubcore_notify_uvs_add_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip, + uint32_t index); +int ubcore_notify_uvs_del_sip(struct ubcore_device *dev, + const struct ubcore_sip_info *sip, + uint32_t index); + +struct ubcore_sip_info * +ubcore_lookup_sip_info_without_lock(struct ubcore_sip_table *sip_table, + uint32_t idx); +struct ubcore_nlmsg *ubcore_new_sip_req_msg(struct ubcore_device *dev, + struct ubcore_sip_info *sip_info, + uint32_t index); + +void ubcore_free_netdev_port_list(struct ubcore_device *dev); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.c b/drivers/ub/urma/ubcore/ubcore_netlink.c new file mode 100644 index 000000000000..651f893c0cde --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netlink.c @@ -0,0 +1,974 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netlink module + * Author: Chen Wen, Yan Fangfang + * Create: 2022-08-27 + * Note: + * History: 2022-08-27: create file + */ + +#include +#include +#include +#include +#include "ubcore_log.h" +#include "ubcore_tp.h" +#include "ubcore_vtp.h" +#include "ubcore_priv.h" +#include "ubcore_netdev.h" +#include "ubcore_device.h" +#include "ubcore_genl_define.h" +#include "ubcore_uvs.h" +#include "ubcore_workqueue.h" +#include "ubcore_priv.h" +#include "ubcore_netlink.h" + +#define UBCORE_NL_INVALID_PORT 0 +#define CB_ARGS_DEV_LIST 0 +#define CB_ARGS_DEV_CNT 1 +#define CB_ARGS_DEV_IDX 2 +#define CB_ARGS_SIP_IDX 3 +#define CB_ARGS_INFO_TYPE 4 + +#define UBCORE_MAX_NL_MSG_BUF_LEN 2048 + +static LIST_HEAD(g_nl_session_list); +static DEFINE_SPINLOCK(g_nl_session_lock); +static atomic_t g_nlmsg_seq; +static atomic_t g_nl_buffer_size = ATOMIC_INIT(208 * 1024); +static int ubcore_genl_unicast(struct ubcore_nlmsg *req, uint32_t len, + struct ubcore_uvs_instance *uvs); +static int ubcore_insert_nl_msg_queue_pop_task(uint32_t wait_time); + +static LIST_HEAD(g_nl_msg_list); +static DEFINE_SPINLOCK(g_nl_msg_lock); + +static uint32_t ubcore_get_nlmsg_seq(void) +{ + return atomic_inc_return(&g_nlmsg_seq); +} + +static void ubcore_nl_msg_list_pop(void) +{ + struct ubcore_nl_message *message = NULL, *next; + unsigned long flags; + int ret = 0; + int msg_size; + int now_buffer; + struct ubcore_uvs_instance *uvs = NULL; + struct sock *genl_sock = NULL; + uint32_t genl_port; + + spin_lock_irqsave(&g_nl_msg_lock, flags); + list_for_each_entry_safe(message, next, &g_nl_msg_list, node) { + uvs = ubcore_uvs_find_get_by_genl_port(message->uvs_genl_port); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs: %u\n", + message->uvs_genl_port); + list_del(&message->node); + kfree(message); + continue; + } + + genl_sock = uvs->genl_sock; + genl_port = uvs->genl_port; + if (genl_sock == NULL || genl_port == UBCORE_NL_INVALID_PORT) { + ubcore_log_err( + "genl_port or genl_sock is invalid for uvs %s", + uvs->name); + ubcore_uvs_kref_put(uvs); + list_del(&message->node); + kfree(message); + continue; + } + + msg_size = NLMSG_DEFAULT_SIZE; + now_buffer = atomic_sub_return(msg_size, &g_nl_buffer_size); + atomic_add(msg_size, &uvs->nl_wait_buffer); + if (now_buffer < 0) { + atomic_add(msg_size, &g_nl_buffer_size); + atomic_sub(msg_size, &uvs->nl_wait_buffer); + ubcore_uvs_kref_put(uvs); + break; + } + + ret = nlmsg_unicast(genl_sock, message->nl_skb, genl_port); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d\n", ret); + atomic_add(msg_size, &g_nl_buffer_size); + atomic_sub(msg_size, &uvs->nl_wait_buffer); + /* + * Failure to send a message can lead to a deadlock; + * a mechanism for continuing to send messages + * upon failure needs to be implemented subsequently + */ + ubcore_uvs_kref_put(uvs); + break; + } + ubcore_uvs_kref_put(uvs); + list_del(&message->node); + kfree(message); + } + spin_unlock_irqrestore(&g_nl_msg_lock, flags); +} + +struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, + const union ubcore_eid *src_eid, + const union ubcore_eid *dst_eid) +{ + struct ubcore_nlmsg *msg; + + msg = kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); + if (msg == NULL) + return NULL; + + if (src_eid != NULL) + msg->src_eid = *src_eid; + + if (dst_eid != NULL) + msg->dst_eid = *dst_eid; + + msg->payload_len = payload_len; + return msg; +} + +static void ubcore_free_nl_session(struct kref *kref) +{ + struct ubcore_nl_session *s = + container_of(kref, struct ubcore_nl_session, kref); + + if (s->dev) + ubcore_put_device(s->dev); + kfree(s->resp); + kfree(s); +} + +static inline void ubcore_get_nl_session(struct ubcore_nl_session *s) +{ + kref_get(&s->kref); +} + +static inline void ubcore_put_nl_session(struct ubcore_nl_session *s) +{ + (void)kref_put(&s->kref, ubcore_free_nl_session); +} + +static struct ubcore_nl_session * +ubcore_create_get_nl_session(struct ubcore_device *dev, + struct ubcore_nlmsg *req, + struct ubcore_nl_resp_cb *cb) +{ + struct ubcore_nl_session *s; + unsigned long flags; + + s = kzalloc(sizeof(struct ubcore_nl_session), GFP_KERNEL); + if (s == NULL) + return NULL; + + ubcore_get_device(dev); + s->dev = dev; + s->cb = *cb; + s->nlmsg_seq = req->nlmsg_seq; + kref_init(&s->kref); + init_completion(&s->comp); + + spin_lock_irqsave(&g_nl_session_lock, flags); + list_add_tail(&s->node, &g_nl_session_list); + ubcore_get_nl_session(s); + spin_unlock_irqrestore(&g_nl_session_lock, flags); + + return s; +} + +static inline void ubcore_rmv_nl_session(struct ubcore_nl_session *s) +{ + unsigned long flags; + bool rmv = false; + + spin_lock_irqsave(&g_nl_session_lock, flags); + if (!list_empty(&s->node)) { + list_del_init(&s->node); + rmv = true; + } + spin_unlock_irqrestore(&g_nl_session_lock, flags); + + if (rmv) + ubcore_put_nl_session(s); +} + +static struct ubcore_nl_session *ubcore_find_get_nl_session(uint32_t nlmsg_seq) +{ + struct ubcore_nl_session *tmp, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_nl_session_lock, flags); + list_for_each_entry(tmp, &g_nl_session_list, node) { + if (tmp->nlmsg_seq == nlmsg_seq) { + target = tmp; + ubcore_get_nl_session(target); + break; + } + } + spin_unlock_irqrestore(&g_nl_session_lock, flags); + return target; +} + +void ubcore_free_dev_nl_sessions(struct ubcore_device *dev) +{ + struct ubcore_nl_session *s = NULL; + struct list_head *p, *next; + unsigned long flags; + + spin_lock_irqsave(&g_nl_session_lock, flags); + list_for_each_safe(p, next, &g_nl_session_list) { + s = list_entry(p, struct ubcore_nl_session, node); + if (s->dev != dev) + continue; + + list_del_init(&s->node); + ubcore_put_nl_session(s); + } + spin_unlock_irqrestore(&g_nl_session_lock, flags); +} + +static struct ubcore_nlmsg *ubcore_get_genlmsg_data(struct genl_info *info) +{ + struct ubcore_nlmsg *msg; + uint32_t payload_len = 0; + + if (!info->attrs[UBCORE_MSG_SEQ]) + return NULL; + + payload_len = (uint32_t)nla_len(info->attrs[UBCORE_PAYLOAD_DATA]); + if (payload_len > UBCORE_MAX_NL_MSG_BUF_LEN) { + ubcore_log_err("Invalid payload len: %d", payload_len); + return NULL; + } + + msg = kzalloc((size_t)(sizeof(struct ubcore_nlmsg) + payload_len), + GFP_KERNEL); + if (msg == NULL) + return NULL; + + msg->payload_len = payload_len; + msg->nlmsg_seq = nla_get_u32(info->attrs[UBCORE_MSG_SEQ]); + if (info->attrs[UBCORE_MSG_TYPE]) + msg->msg_type = nla_get_u32(info->attrs[UBCORE_MSG_TYPE]); + + if (info->attrs[UBCORE_TRANSPORT_TYPE]) + msg->transport_type = (enum ubcore_transport_type)nla_get_u32( + info->attrs[UBCORE_TRANSPORT_TYPE]); + + if (info->attrs[UBORE_SRC_ID]) + (void)memcpy(&msg->src_eid, nla_data(info->attrs[UBORE_SRC_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBORE_DST_ID]) + (void)memcpy(&msg->dst_eid, nla_data(info->attrs[UBORE_DST_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCORE_PAYLOAD_DATA]) { + (void)memcpy(msg->payload, + nla_data(info->attrs[UBCORE_PAYLOAD_DATA]), + payload_len); + } + + return msg; +} + +int ubcore_tp_resp_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + + resp = ubcore_get_genlmsg_data(info); + if (resp == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return 0; + } + s = ubcore_find_get_nl_session(resp->nlmsg_seq); + if (s == NULL) { + ubcore_log_err("Failed to find nl session with seq %u", + resp->nlmsg_seq); + kfree(resp); + return 0; + } + s->resp = resp; + complete(&s->comp); + ubcore_put_nl_session(s); + return 0; +} + +static int ubcore_genl_unicast(struct ubcore_nlmsg *req, uint32_t len, + struct ubcore_uvs_instance *uvs) +{ + int ret = 0; + struct sk_buff *nl_skb; + struct nlmsghdr *nlh; + struct ubcore_nl_message *nl_messages; + unsigned long flags; + + if (req == NULL || uvs->genl_sock == NULL || + uvs->genl_port == UBCORE_NL_INVALID_PORT) { + ubcore_log_err("There are illegal parameters.\n"); + return -1; + } + + /* create sk_buff */ + nl_skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (nl_skb == NULL) { + ubcore_log_err("failed to alloc.\n"); + return -1; + } + /* set genl head */ + nlh = genlmsg_put(nl_skb, uvs->genl_port, req->nlmsg_seq, + &ubcore_genl_family, NLM_F_ACK, + (uint8_t)req->msg_type); + if (nlh == NULL) { + ubcore_log_err("Failed to nlmsg put.\n"); + nlmsg_free(nl_skb); + return -1; + } + if (nla_put_u32(nl_skb, UBCORE_MSG_SEQ, req->nlmsg_seq) || + nla_put_u32(nl_skb, UBCORE_MSG_TYPE, (uint32_t)req->msg_type) || + nla_put_u32(nl_skb, UBCORE_TRANSPORT_TYPE, + (uint32_t)req->transport_type) || + nla_put(nl_skb, UBCORE_PAYLOAD_DATA, (int)req->payload_len, + req->payload)) { + nlmsg_free(nl_skb); + return -1; + } + + genlmsg_end(nl_skb, nlh); + ubcore_log_debug("send genl msg type %d seq:%u payload_len %u", + (int)req->msg_type, req->nlmsg_seq, req->payload_len); + + nl_messages = kzalloc(sizeof(struct ubcore_nl_message), GFP_ATOMIC); + if (nl_messages == NULL) + return -1; + nl_messages->nl_skb = nl_skb; + nl_messages->uvs_genl_port = uvs->genl_port; + + spin_lock_irqsave(&g_nl_msg_lock, flags); + list_add_tail(&nl_messages->node, &g_nl_msg_list); + spin_unlock_irqrestore(&g_nl_msg_lock, flags); + ret = ubcore_insert_nl_msg_queue_pop_task(0); + if (ret != 0) { + ubcore_log_err("Failed to insert_nl_msg_queue_pop_task\n"); + return -1; + } + + return ret; +} + +int ubcore_nl_msg_ack_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_uvs_instance *uvs = NULL; + int ack_size; + + ack_size = NLMSG_DEFAULT_SIZE; + uvs = ubcore_uvs_find_get_by_genl_port(info->snd_portid); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs, port = %d\n", + info->snd_portid); + return 0; + } + + atomic_sub(ack_size, &uvs->nl_wait_buffer); + atomic_add(ack_size, &g_nl_buffer_size); + ubcore_uvs_kref_put(uvs); + + if (ubcore_insert_nl_msg_queue_pop_task(0) != 0) + ubcore_log_err("Failed to insert_nl_msg_queue_pop_task\n"); + + return 0; +} + +int ubcore_tp_req_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_uvs_instance *uvs = NULL; + struct ubcore_nlmsg *resp = + NULL; // resp might not alloc memory, must init resp to null + struct ubcore_nlmsg *req; + + req = ubcore_get_genlmsg_data(info); + if (req == NULL) { + ubcore_log_err("Failed to calloc and copy req"); + return 0; + } + if (req->msg_type == UBCORE_CMD_RESTORE_TP_REQ) + resp = ubcore_handle_restore_tp_req(req); + + if (resp == NULL) { + ubcore_log_err("Failed to handle tp req"); + kfree(req); + return 0; + } + + uvs = ubcore_uvs_find_get_by_genl_port(info->snd_portid); + if (uvs == NULL) { + kfree(req); + kfree(resp); // since init resp to null, we can use kfree here + return 0; + } + + if (ubcore_genl_unicast(resp, ubcore_nlmsg_len(resp), uvs) != 0) + ubcore_log_err("Failed to send response"); + + ubcore_uvs_kref_put(uvs); + kfree(req); + kfree(resp); // since init resp to null, we can use kfree here + return 0; +} + +int ubcore_mue2ue_resp_ops(struct sk_buff *skb, struct genl_info *info) +{ + struct ubcore_nl_session *s; + struct ubcore_nlmsg *resp; + struct ubcore_device *dev; + + resp = ubcore_get_genlmsg_data(info); + if (resp == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return 0; + } + s = ubcore_find_get_nl_session(resp->nlmsg_seq); + if (s == NULL) { + ubcore_log_err("Failed to find nl session with seq %u", + resp->nlmsg_seq); + kfree(resp); + return 0; + } + s->resp = resp; + + dev = s->dev; + if (dev != NULL && s->cb.callback != NULL) { + s->cb.user_arg = (void *)dev; + s->cb.callback(resp, s->cb.user_arg); + } else { + ubcore_log_err("Invalid param"); + } + + ubcore_rmv_nl_session(s); + ubcore_put_nl_session(s); + return 0; +} + +int ubcore_tp2ue_vtp_status_notify_ops(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcore_vtp_status_notify *notify; + struct ubcore_resp_host *resp_host; + struct ubcore_nlmsg *nlmsg; + struct ubcore_device *dev; + + nlmsg = ubcore_get_genlmsg_data(info); + if (nlmsg == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return 0; + } + if (nlmsg->payload_len < + sizeof(struct ubcore_resp_host) + + sizeof(struct ubcore_vtp_status_notify)) { + ubcore_log_err("Netlink msg payload length not match, len:%u", + nlmsg->payload_len); + goto free_msg; + } + + resp_host = (struct ubcore_resp_host *)nlmsg->payload; + if (resp_host->resp.len < sizeof(struct ubcore_vtp_status_notify)) { + ubcore_log_err("wrong msg size:%u", resp_host->resp.len); + goto free_msg; + } + + notify = (struct ubcore_vtp_status_notify *)resp_host->resp.data; + if (strnlen(notify->mue_name, UBCORE_MAX_DEV_NAME) >= + UBCORE_MAX_DEV_NAME) + goto free_msg; + + dev = ubcore_find_mue_device_by_name(notify->mue_name); + if (!dev) { + ubcore_log_err("Failed to find dev %s", notify->mue_name); + goto free_msg; + } + + (void)ubcore_send_resp(dev, resp_host); + ubcore_put_device(dev); + +free_msg: + kfree(nlmsg); + return 0; +} + +int ubcore_update_mue_dev_info_resp_ops(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcore_update_mue_dev_info_resp *resp; + struct ubcore_nlmsg *nlmsg; + + nlmsg = ubcore_get_genlmsg_data(info); + if (nlmsg == NULL) { + ubcore_log_err("Failed to calloc and copy response"); + return 0; + } + if (nlmsg->payload_len < + sizeof(struct ubcore_update_mue_dev_info_resp)) { + ubcore_log_err("Netlink msg payload length not match, len:%u", + nlmsg->payload_len); + goto free_msg; + } + + resp = (struct ubcore_update_mue_dev_info_resp *)(void *)nlmsg->payload; + if (resp->ret != UBCORE_NL_RESP_SUCCESS) + ubcore_log_err("Failed to get mue dev info resp"); + +free_msg: + kfree(nlmsg); + return 0; +} + +static struct ubcore_nlmsg * +ubcore_get_migrate_vtp_req(struct ubcore_vtp *vtp, + enum ubcore_event_type event_type, + struct ubcore_device *dev) +{ + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_migrate_vtp_req); + struct ubcore_migrate_vtp_req *mig_req; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &vtp->cfg.local_eid, + &vtp->cfg.peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = UBCORE_TRANSPORT_UB; + if (event_type == UBCORE_EVENT_MIGRATE_VTP_SWITCH) { + req->msg_type = UBCORE_CMD_MIGRATE_VTP_SWITCH; + } else if (event_type == UBCORE_EVENT_MIGRATE_VTP_ROLLBACK) { + req->msg_type = UBCORE_CMD_MIGRATE_VTP_ROLLBACK; + } else { + kfree(req); + ubcore_log_err("wrong event msg type"); + return NULL; + } + mig_req = (struct ubcore_migrate_vtp_req *)(void *)req->payload; + (void)memcpy(mig_req->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + + mig_req->vtp_cfg.ue_idx = vtp->cfg.ue_idx; + mig_req->vtp_cfg.vtpn = vtp->cfg.vtpn; + mig_req->vtp_cfg.local_jetty = vtp->cfg.local_jetty; + mig_req->vtp_cfg.local_eid = vtp->cfg.local_eid; + mig_req->vtp_cfg.peer_eid = vtp->cfg.peer_eid; + mig_req->vtp_cfg.peer_jetty = vtp->cfg.peer_jetty; + mig_req->vtp_cfg.flag = vtp->cfg.flag; + mig_req->vtp_cfg.trans_mode = vtp->cfg.trans_mode; + + return req; +} + +void ubcore_report_migrate_vtp(struct ubcore_device *dev, + struct ubcore_vtp *vtp, + enum ubcore_event_type event_type) +{ + struct ubcore_uvs_instance *uvs = NULL; + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = ubcore_get_migrate_vtp_req(vtp, event_type, dev); + if (req_msg == NULL) { + ubcore_log_err("Failed to get migrate vtp switch req"); + return; + } + + uvs = ubcore_find_get_uvs_by_ue(dev, vtp->cfg.ue_idx); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs for ue %u", vtp->cfg.ue_idx); + kfree(req_msg); + return; + } + + ret = ubcore_nl_send_nowait_without_cb(req_msg, uvs); + if (ret) + ubcore_log_err("Failed to nowait send migrate vtp request"); + else + ubcore_log_info("Success to nowait send migrate vtp request"); + + ubcore_uvs_kref_put(uvs); + kfree(req_msg); +} + +struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_device *dev, + struct ubcore_nlmsg *req, + struct ubcore_uvs_instance *uvs) +{ + struct ubcore_nl_resp_cb cb = {}; + struct ubcore_nlmsg *resp = NULL; + struct ubcore_nl_session *s; + unsigned long leavetime; + int ret; + + if (uvs->genl_sock == NULL || + uvs->genl_port == UBCORE_NL_INVALID_PORT) { + ubcore_log_err("genl_port or genl_sock is invalid for uvs %s", + uvs->name); + return NULL; + } + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + s = ubcore_create_get_nl_session(dev, req, &cb); + if (s == NULL) { + ubcore_log_err("Failed to create nl session"); + return NULL; + } + + ret = ubcore_genl_unicast(req, ubcore_nlmsg_len(req), uvs); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d", ret); + goto exit; + } + + leavetime = wait_for_completion_timeout( + &s->comp, msecs_to_jiffies(UBCORE_TYPICAL_TIMEOUT)); + if (leavetime == 0) { + ubcore_log_err( + "Failed to wait reply, ret: %d, leavetime: %lu\n", ret, + leavetime); + goto exit; + } + + resp = s->resp; + s->resp = NULL; /* resp memory is managed by caller */ + +exit: + ubcore_rmv_nl_session(s); + ubcore_put_nl_session(s); + return resp; +} + +int ubcore_nl_send_nowait(struct ubcore_device *dev, struct ubcore_nlmsg *req, + struct ubcore_nl_resp_cb *cb, + struct ubcore_uvs_instance *uvs) +{ + struct ubcore_nl_session *s; + int ret; + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + s = ubcore_create_get_nl_session(dev, req, cb); + if (s == NULL) { + ubcore_log_err("Failed to create nl session"); + return -ENOMEM; + } + + ret = ubcore_genl_unicast(req, ubcore_nlmsg_len(req), uvs); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg %d", ret); + ubcore_rmv_nl_session(s); + ubcore_put_nl_session(s); + return -EIO; + } + + ubcore_put_nl_session(s); + return 0; +} + +int ubcore_nl_send_nowait_without_cb(struct ubcore_nlmsg *req, + struct ubcore_uvs_instance *uvs) +{ + int ret; + + req->nlmsg_seq = ubcore_get_nlmsg_seq(); + + if (uvs->genl_sock == NULL || + uvs->genl_port == UBCORE_NL_INVALID_PORT) { + ubcore_log_err("genl_port or genl_sock is invalid for uvs %s", + uvs->name); + return -EINVAL; + } + + ret = ubcore_genl_unicast(req, ubcore_nlmsg_len(req), uvs); + if (ret != 0) { + ubcore_log_err("Failed to send nl msg to uvs %s, return %d", + uvs->name, ret); + return -EIO; + } + + return 0; +} + +bool ubcore_get_netlink_valid(void) +{ + bool any_uvs_is_connected; + + any_uvs_is_connected = ubcore_uvs_list_get_alive_count() > 0; + if (!any_uvs_is_connected) + ubcore_log_warn("The nelink service is not established well"); + + return any_uvs_is_connected; +} + +static int ubcore_set_genl_nla(struct sk_buff *skb, + struct ubcore_nlmsg *req_msg) +{ + if (nla_put_u32(skb, UBCORE_MSG_SEQ, req_msg->nlmsg_seq) || + nla_put_u32(skb, UBCORE_MSG_TYPE, (uint32_t)req_msg->msg_type) || + nla_put_u32(skb, UBCORE_TRANSPORT_TYPE, + (uint32_t)req_msg->transport_type) || + nla_put(skb, UBCORE_PAYLOAD_DATA, (int)req_msg->payload_len, + req_msg->payload)) + return -1; + + return 0; +} + +static int ubcore_dump_genl_info(struct sk_buff *skb, + struct netlink_callback *cb, + struct ubcore_nlmsg *req_msg) +{ + void *hdr; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &ubcore_genl_family, NLM_F_MULTI, + (uint8_t)req_msg->msg_type); + if (!hdr) + return -ENOMEM; + + if (ubcore_set_genl_nla(skb, req_msg)) { + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; + } + genlmsg_end(skb, hdr); + return 0; +} + +static int ubcore_dev_sip_info(struct sk_buff *skb, struct netlink_callback *cb, + struct ubcore_device *dev) +{ + long i = cb->args[CB_ARGS_SIP_IDX]; + struct ubcore_sip_table *sip_table; + struct ubcore_nlmsg *req_msg; + struct ubcore_sip_info *sip; + uint32_t max_cnt; + int ret; + + sip_table = &dev->sip_table; + mutex_lock(&sip_table->lock); + max_cnt = sip_table->max_sip_cnt; + for (; i < max_cnt; i++) { + sip = &sip_table->entry[i].sip_info; + if (!sip->is_active) + continue; + req_msg = ubcore_new_sip_req_msg(dev, sip, + (uint32_t)(unsigned long)i); + if (req_msg == NULL) { + ubcore_log_warn("failed to get sip info %s", + dev->dev_name); + continue; + } + ret = ubcore_dump_genl_info(skb, cb, req_msg); + kfree(req_msg); + if (ret != 0) { + mutex_unlock(&sip_table->lock); + return -1; + } + cb->args[CB_ARGS_SIP_IDX] = i; + } + mutex_unlock(&sip_table->lock); + + if (i == max_cnt) + cb->args[CB_ARGS_SIP_IDX] = 0; + return 0; +} + +static int ubcore_get_sip_info_dump(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ubcore_device **dev_list = + (struct ubcore_device **)cb->args[CB_ARGS_DEV_LIST]; + long dev_cnt = cb->args[CB_ARGS_DEV_CNT]; + long i = cb->args[CB_ARGS_DEV_IDX]; + + for (; i < dev_cnt; ++i) { + if (ubcore_dev_sip_info(skb, cb, dev_list[i])) + break; + } + cb->args[CB_ARGS_DEV_IDX] = i; + return (int)skb->len; +} + +static int ubcore_get_mue_dev_dump(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ubcore_device **dev_list = + (struct ubcore_device **)cb->args[CB_ARGS_DEV_LIST]; + long dev_cnt = cb->args[CB_ARGS_DEV_CNT]; + long i = cb->args[CB_ARGS_DEV_IDX]; + struct ubcore_nlmsg *req_msg; + int ret; + + for (; i < dev_cnt; ++i) { + req_msg = ubcore_new_mue_dev_msg(dev_list[i]); + if (req_msg == NULL) { + ubcore_log_warn("failed to get mue dev info %s", + dev_list[i]->dev_name); + continue; + } + ret = ubcore_dump_genl_info(skb, cb, req_msg); + kfree(req_msg); + if (ret != 0) + break; + ubcore_log_info("dump dev :%s success.\n", + dev_list[i]->dev_name); + } + if (i == dev_cnt) { + cb->args[CB_ARGS_INFO_TYPE] = 1; + cb->args[CB_ARGS_DEV_IDX] = 0; + } else { + cb->args[CB_ARGS_DEV_IDX] = i; + } + return (int)skb->len; +} + +int ubcore_get_uvs_init_res_start(struct netlink_callback *cb) +{ + struct ubcore_device **dev_list = NULL; + uint32_t dev_cnt; + + dev_list = ubcore_get_all_mue_device(UBCORE_TRANSPORT_UB, &dev_cnt); + ubcore_log_info("init_res :%u start.\n", dev_cnt); + + cb->args[CB_ARGS_DEV_LIST] = (long)dev_list; + cb->args[CB_ARGS_DEV_CNT] = dev_cnt; + cb->args[CB_ARGS_DEV_IDX] = 0; + cb->args[CB_ARGS_SIP_IDX] = 0; + cb->args[CB_ARGS_INFO_TYPE] = 0; + cb->args[CB_ARGS_SIP_IDX] = 0; + return 0; +} + +int ubcore_get_uvs_init_res_dump(struct sk_buff *skb, + struct netlink_callback *cb) +{ + long info_type = cb->args[CB_ARGS_INFO_TYPE]; + long dev_cnt = cb->args[CB_ARGS_DEV_CNT]; + int ret; + + if (dev_cnt <= 0) + return 0; + if (info_type == 0) + ret = ubcore_get_mue_dev_dump(skb, cb); + else + ret = ubcore_get_sip_info_dump(skb, cb); + ubcore_log_info("init_res ret:%d.\n", ret); + return ret; +} + +int ubcore_get_uvs_init_res_done(struct netlink_callback *cb) +{ + struct ubcore_device **dev_list = + (struct ubcore_device **)cb->args[CB_ARGS_DEV_LIST]; + long dev_cnt = cb->args[CB_ARGS_DEV_CNT]; + long i; + + for (i = 0; i < dev_cnt; ++i) + if (dev_list[i] != NULL) + ubcore_put_device(dev_list[i]); + + kfree(dev_list); + ubcore_log_info("init_res done.\n"); + return 0; +} + +int ubcore_set_genl_pid_ops(struct sk_buff *skb, struct genl_info *info) +{ + char uvs_name[UBCORE_MAX_UVS_NAME_LEN]; + uint32_t payload_len; + int ret; + + payload_len = (uint32_t)nla_len(info->attrs[UBCORE_PAYLOAD_DATA]); + if (payload_len == 0 || payload_len > UBCORE_MAX_UVS_NAME_LEN) { + ubcore_log_err("invalid payload len: %d", payload_len); + return 0; + } + + if (info->attrs[UBCORE_PAYLOAD_DATA] == NULL) { + ubcore_log_err("invalid payload data\n"); + return 0; + } + + (void)memcpy(uvs_name, nla_data(info->attrs[UBCORE_PAYLOAD_DATA]), + UBCORE_MAX_UVS_NAME_LEN - 1); + uvs_name[UBCORE_MAX_UVS_NAME_LEN - 1] = '\0'; + + ret = ubcore_uvs_set_genl_info(uvs_name, info->snd_portid, + genl_info_net(info)->genl_sock); + if (ret != 0) + ubcore_log_err("uvs instance %s doesn't exist\n", uvs_name); + + return 0; +} + +void ubcore_unset_genl_pid_ops(uint32_t genl_port) +{ + struct ubcore_uvs_instance *uvs; + + uvs = ubcore_uvs_find_get_by_genl_port(genl_port); + if (uvs != NULL) { + ubcore_log_info( + "successfully unset port %u for uvs %s, uvs_id %u.\n", + genl_port, uvs->name, uvs->id); + uvs->genl_port = UBCORE_NL_INVALID_PORT; + uvs->genl_sock = NULL; + ubcore_uvs_kref_put(uvs); + } +} + +static void ubcore_nl_msg_queue_pop_task(struct work_struct *work) +{ + struct delayed_work *delay_work = + container_of(work, struct delayed_work, work); + + kfree(delay_work); + ubcore_nl_msg_list_pop(); +} + +static int ubcore_insert_nl_msg_queue_pop_task(uint32_t wait_time) +{ + struct delayed_work *delay_work = + kzalloc(sizeof(struct delayed_work), GFP_ATOMIC); + + if (delay_work == NULL) + return -ENOMEM; + + INIT_DELAYED_WORK(delay_work, ubcore_nl_msg_queue_pop_task); + if (ubcore_queue_delayed_work((int)UBCORE_NLMSG_WQ, delay_work, + wait_time) != 0) { + ubcore_log_err("Fail to insert nl msg queue pop task.\n"); + kfree(delay_work); + return -1; + } + return 0; +} + +void ubcore_uvs_release_nl_buffer(struct ubcore_uvs_instance *uvs) +{ + int wait_buffer; + + ubcore_uvs_kref_get(uvs); + wait_buffer = atomic_read(&uvs->nl_wait_buffer); + atomic_add(wait_buffer, &g_nl_buffer_size); + ubcore_log_warn( + "successfully release wait buffer for uvs: %s, size: %d/%d\n", + uvs->name, wait_buffer, atomic_read(&g_nl_buffer_size)); + ubcore_uvs_kref_put(uvs); +} diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.h b/drivers/ub/urma/ubcore/ubcore_netlink.h new file mode 100644 index 000000000000..ce23505cb89c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_netlink.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore netlink head file + * Author: Chen Wen + * Create: 2022-08-27 + * Note: + * History: 2022-08-27: Create file + */ + +#ifndef UBCORE_NETLINK_H +#define UBCORE_NETLINK_H + +#include +#include +#include +#include "ubcore_cmd.h" + +struct ubcore_uvs_instance; + +enum ubcore_nl_resp_status { + UBCORE_NL_RESP_IN_PROGRESS = -2, + UBCORE_NL_RESP_FAIL = -1, + UBCORE_NL_RESP_SUCCESS = 0 +}; + +struct ubcore_nlmsg { + uint32_t nlmsg_seq; + enum ubcore_cmd msg_type; + union ubcore_eid src_eid; /* todo: delete */ + union ubcore_eid dst_eid; /* todo: delete */ + enum ubcore_transport_type transport_type; + uint32_t payload_len; + uint8_t payload[0]; // limited by tpsa_nl_msg_t's payload len +}; + +struct ubcore_ta_data { + enum ubcore_transport_type trans_type; + enum ubcore_ta_type ta_type; + struct ubcore_jetty_id jetty_id; /* local jetty id */ + struct ubcore_jetty_id tjetty_id; /* peer jetty id */ + bool is_target; +}; + +struct ubcore_msg_ack { + uint32_t payload_len; +}; + +struct ubcore_nl_query_tp_req { + enum ubcore_transport_mode trans_mode; + char dev_name[UBCORE_MAX_DEV_NAME]; + uint16_t ue_idx; +}; + +struct ubcore_nl_query_tp_resp { + enum ubcore_nl_resp_status ret; + uint8_t retry_num; + uint8_t retry_factor; + uint8_t ack_timeout; + uint8_t dscp; + uint32_t oor_cnt; +}; + +struct ubcore_nl_restore_tp_req { + enum ubcore_transport_mode trans_mode; + uint32_t tpn; + uint32_t peer_tpn; + uint32_t rx_psn; + struct ubcore_ta_data ta; +}; + +struct ubcore_nl_restore_tp_resp { + enum ubcore_nl_resp_status ret; + uint32_t peer_rx_psn; +}; + +struct ubcore_nl_resp_cb { + void *user_arg; + int (*callback)(struct ubcore_nlmsg *resp, void *user_arg); +}; + +struct ubcore_nl_session { + uint32_t nlmsg_seq; + struct ubcore_nlmsg *resp; /* memory is managed by session */ + struct list_head node; + struct kref kref; + struct ubcore_nl_resp_cb cb; + struct completion + comp; /* Synchronization event of timeout sleep and thread wakeup */ + struct ubcore_device *dev; +}; + +struct ubcore_nl_message { + struct list_head node; + struct sk_buff *nl_skb; + uint32_t uvs_genl_port; +}; + +struct ubcore_add_sip_req { + struct ubcore_net_addr netaddr; + char dev_name[UBCORE_MAX_DEV_NAME]; + uint8_t port_cnt; + uint8_t port_id[UBCORE_MAX_PORT_CNT]; + uint32_t index; + uint32_t mtu; + char netdev_name[UBCORE_MAX_DEV_NAME]; /* for change mtu */ +}; + +struct ubcore_add_sip_resp { + enum ubcore_nl_resp_status ret; +}; + +struct ubcore_del_sip_req { + char dev_name[UBCORE_MAX_DEV_NAME]; + uint32_t index; +}; + +struct ubcore_del_sip_resp { + enum ubcore_nl_resp_status ret; +}; + +struct ubcore_tp_suspend_req { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint32_t sip_idx; + char mue_dev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_tp_flush_done_req { + uint32_t tpgn; + uint32_t tpn; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint32_t tx_psn; + uint32_t peer_tpn; + enum ubcore_transport_mode trans_mode; + uint32_t sip_idx; + struct ubcore_net_addr sip; + union ubcore_eid local_eid; + uint32_t local_jetty_id; + union ubcore_eid peer_eid; + uint32_t peer_jetty_id; + char mue_dev_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_nl_function_mig_req { + uint16_t mig_ue_idx; + char dev_name[UBCORE_MAX_DEV_NAME]; +}; + +enum ubcore_update_mue_opcode { + UBCORE_UPDATE_MUE_ADD = 0, + UBCORE_UPDATE_MUE_DEL +}; + +struct ubcore_update_mue_dev_info_req { + char dev_name[UBCORE_MAX_DEV_NAME]; + char netdev_name[UBCORE_MAX_DEV_NAME]; + union ubcore_device_feat dev_fea; + uint32_t cc_entry_cnt; + enum ubcore_update_mue_opcode opcode; + uint8_t data[]; +}; // same as tpsa_nl_update_mue_dev_info_req + +struct ubcore_update_mue_dev_info_resp { + enum ubcore_nl_resp_status ret; +}; // same as tpsa_nl_update_mue_dev_info_resp + +static inline uint32_t ubcore_nlmsg_len(struct ubcore_nlmsg *msg) +{ + return sizeof(struct ubcore_nlmsg) + msg->payload_len; +} + +struct ubcore_nlmsg_delay_work { + struct delayed_work delay_work; + struct ubcore_uvs_instance *uvs; + struct ubcore_nlmsg *req; + uint32_t len; +}; + +bool ubcore_get_netlink_valid(void); +/* return response msg pointer, caller must release it */ +struct ubcore_nlmsg *ubcore_nl_send_wait(struct ubcore_device *dev, + struct ubcore_nlmsg *req, + struct ubcore_uvs_instance *uvs); + +int ubcore_nl_send_nowait(struct ubcore_device *dev, struct ubcore_nlmsg *req, + struct ubcore_nl_resp_cb *cb, + struct ubcore_uvs_instance *uvs); +int ubcore_nl_send_nowait_without_cb(struct ubcore_nlmsg *req, + struct ubcore_uvs_instance *uvs); + +struct ubcore_nlmsg *ubcore_alloc_nlmsg(size_t payload_len, + const union ubcore_eid *src_eid, + const union ubcore_eid *dst_eid); + +void ubcore_report_migrate_vtp(struct ubcore_device *dev, + struct ubcore_vtp *vtp, + enum ubcore_event_type event_type); + +int ubcore_get_uvs_init_res_done(struct netlink_callback *cb); +int ubcore_get_uvs_init_res_dump(struct sk_buff *skb, + struct netlink_callback *cb); +int ubcore_get_uvs_init_res_start(struct netlink_callback *cb); +extern struct genl_family ubcore_genl_family; +int ubcore_set_genl_pid_ops(struct sk_buff *skb, struct genl_info *info); +void ubcore_unset_genl_pid_ops(uint32_t genl_port); +int ubcore_mue2ue_resp_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_tp_resp_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_tp_req_ops(struct sk_buff *skb, struct genl_info *info); +int ubcore_update_mue_dev_info_resp_ops(struct sk_buff *skb, + struct genl_info *info); +int ubcore_tp2ue_vtp_status_notify_ops(struct sk_buff *skb, + struct genl_info *info); +int ubcore_nl_msg_ack_ops(struct sk_buff *skb, struct genl_info *info); +void ubcore_free_dev_nl_sessions(struct ubcore_device *dev); +void ubcore_uvs_release_nl_buffer(struct ubcore_uvs_instance *uvs); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h new file mode 100644 index 000000000000..42bef8e2bc1c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore's private data structure and function declarations + * Author: Qian Guoxin + * Create: 2022-7-22 + * Note: + * History: 2022-7-22: Create file + */ + +#ifndef UBCORE_PRIV_H +#define UBCORE_PRIV_H + +#include +#include +#include +#include "ubcore_tp.h" + +#define UBCORE_MAX_UVS_NAME_LEN 64 +#define UBCORE_MAX_UVS_CNT 64 +#define UBCORE_MAX_MUE_NUM 16 + +/* + * Pure UB device, netdev type is Unified Bus (UB). + * On the Internet Assigned Numbers Authority, add Hardware Types: Unified Bus (UB) + */ +#define UBCORE_NETDEV_UB_TYPE (38) /* Unified Bus(UB) */ +#define UCBORE_INVALID_UPI 0xffffffff +#define UBCORE_TYPICAL_TIMEOUT 30000 /* 30s */ +#define UBCORE_DESTROY_TIMEOUT 2000 /* 2s */ +#define UCBORE_DEFAULT_UPI 0 + +enum ubcore_uvs_state { + UBCORE_UVS_STATE_DEAD = 0, + UBCORE_UVS_STATE_ALIVE, +}; + +enum ubcore_restore_policy { + UBCORE_RESTORE_POLICY_KEEP = 0, + UBCORE_RESTORE_POLICY_CLEANUP, + UBCORE_RESTORE_POLICY_MAX, +}; + +struct sip_idx_node { + struct list_head node; + uint32_t sip_idx; + struct ubcore_sip_info *sip_info; +}; + +struct ubcore_uvs_instance { + struct list_head list_node; + struct kref ref; + + char name[UBCORE_MAX_UVS_NAME_LEN]; /* name to identify UVS */ + enum ubcore_uvs_state state; + uint32_t id; + uint32_t policy; + + uint32_t genl_port; /* uvs genl port */ + struct sock *genl_sock; + uint32_t pid; + atomic_t map2ue; + atomic_t nl_wait_buffer; + spinlock_t sip_list_lock; + struct list_head sip_list; +}; + +struct ubcore_ue_entry { + struct ubcore_uvs_instance *uvs_inst; + DECLARE_BITMAP(eid_bitmap, UBCORE_MAX_EID_CNT); +}; + +struct ubcore_ue_table { + char mue_name[UBCORE_MAX_DEV_NAME]; + struct ubcore_ue_entry ue_entries[UBCORE_MAX_UE_CNT]; + spinlock_t ue2uvs_lock; +}; + +struct ubcore_mue_file { + struct kref ref; + atomic_t driver_get; + char mue_name[UBCORE_MAX_DEV_NAME]; +}; + +struct ubcore_global_file { + struct kref ref; + struct ubcore_uvs_instance *uvs; +}; + +static inline struct ubcore_ucontext * +ubcore_get_uctx(struct ubcore_udata *udata) +{ + return udata == NULL ? NULL : udata->uctx; +} + +static inline bool +ubcore_check_trans_mode_valid(enum ubcore_transport_mode trans_mode) +{ + return trans_mode == UBCORE_TP_RM || trans_mode == UBCORE_TP_RC || + trans_mode == UBCORE_TP_UM; +} + +/* combine sub_trans_mode and share_tp -> uint16_t? */ +static inline bool is_create_rc_shared_tp(enum ubcore_transport_mode trans_mode, + uint32_t order_type, + uint32_t share_tp) +{ + if (trans_mode == UBCORE_TP_RC && order_type == UBCORE_OT && + share_tp == 1) + return true; + + return false; +} + +/* Caller must put device */ +struct ubcore_device *ubcore_find_device(union ubcore_eid *eid, + enum ubcore_transport_type type); +struct ubcore_device *ubcore_find_device_with_name(const char *dev_name); +bool ubcore_check_dev_is_exist(const char *dev_name); +void ubcore_get_device(struct ubcore_device *dev); +void ubcore_put_device(struct ubcore_device *dev); +struct ubcore_device * +ubcore_find_mue_device(union ubcore_net_addr_union *netaddr, + enum ubcore_transport_type type); +struct ubcore_device *ubcore_find_mue_by_dev(struct ubcore_device *dev); +struct ubcore_device *ubcore_find_mue_device_by_name(char *dev_name); +/* returned list should be freed by caller */ +struct ubcore_device ** +ubcore_get_all_mue_device(enum ubcore_transport_type type, uint32_t *dev_cnt); + +int ubcore_update_eidtbl_by_idx(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t eid_idx, + bool is_alloc_eid, struct net *net); +int ubcore_update_eidtbl_by_eid(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t *eid_idx, + bool is_alloc_eid, struct net *net); + +int ubcore_find_upi_with_dev_name(const char *dev_name, uint32_t *upi); +int ubcore_add_upi_list(struct ubcore_device *dev, uint32_t upi); + +/* Must call ubcore_put_devices to put and release the returned devices */ +struct ubcore_device **ubcore_get_devices_from_netdev(struct net_device *netdev, + uint32_t *cnt); +void ubcore_put_devices(struct ubcore_device **devices, uint32_t cnt); +void ubcore_update_netdev_addr(struct ubcore_device *dev, + struct net_device *netdev, + enum ubcore_net_addr_op op, bool async); +void ubcore_update_netaddr(struct ubcore_device *dev, struct net_device *netdev, + bool add); +int ubcore_fill_netaddr_macvlan(struct ubcore_net_addr *netaddr, + struct net_device *netdev, + enum ubcore_net_addr_type type); + +void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, struct ubcore_tp_cfg *cfg); +struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, + struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata); +int ubcore_modify_tp(struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + struct ubcore_tp_attr *tp_attr, struct ubcore_udata udata); +void ubcore_update_all_vlan_netaddr(struct ubcore_device *dev, + enum ubcore_net_addr_op op); + +static inline uint32_t ubcore_get_jetty_hash(struct ubcore_jetty_id *jetty_id) +{ + return jhash(jetty_id, sizeof(struct ubcore_jetty_id), 0); +} + +static inline uint32_t ubcore_get_tseg_hash(struct ubcore_ubva *ubva) +{ + return jhash(ubva, sizeof(struct ubcore_ubva), 0); +} + +static inline uint32_t ubcore_get_eid_hash(union ubcore_eid *eid) +{ + return jhash(eid, sizeof(union ubcore_eid), 0); +} + +static inline uint32_t ubcore_get_vtp_hash(union ubcore_eid *local_eid) +{ + return jhash(local_eid, + sizeof(union ubcore_eid) + sizeof(union ubcore_eid), 0); +} + +static inline uint32_t ubcore_get_rc_vtp_hash(union ubcore_eid *peer_eid) +{ + return jhash(peer_eid, sizeof(union ubcore_eid) + sizeof(uint32_t), 0); +} + +static inline uint32_t ubcore_get_vtpn_hash(struct ubcore_hash_table *ht, + void *key_addr) +{ + return jhash(key_addr, ht->p.key_size, 0); +} + +static inline uint32_t ubcore_get_ex_tp_hash(uint64_t *tp_handle) +{ + return jhash(tp_handle, sizeof(uint64_t), 0); +} + +static inline bool ubcore_is_ub_device(struct ubcore_device *dev) +{ + return (dev->transport_type == UBCORE_TRANSPORT_UB); +} + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c new file mode 100644 index 000000000000..35af2463c51c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore segment + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: Yan Fangfang move segment implementation here + */ + +#include "ubcore_connect_bonding.h" +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +struct ubcore_token_id *ubcore_alloc_token_id(struct ubcore_device *dev, + union ubcore_token_id_flag flag, + struct ubcore_udata *udata) +{ + struct ubcore_token_id *token_id; + + if (flag.bs.pa == 1 && udata != NULL) { + ubcore_log_err("invalid parameter of pa.\n"); + return ERR_PTR(-EINVAL); + } + + if (dev == NULL || dev->ops == NULL || + dev->ops->alloc_token_id == NULL || + dev->ops->free_token_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return ERR_PTR(-EINVAL); + } + + token_id = dev->ops->alloc_token_id(dev, flag, udata); + if (IS_ERR_OR_NULL(token_id)) { + ubcore_log_err("failed to alloc token_id id.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(token_id, ENOEXEC); + } + token_id->flag = flag; + token_id->ub_dev = dev; + token_id->uctx = ubcore_get_uctx(udata); + atomic_set(&token_id->use_cnt, 0); + return token_id; +} +EXPORT_SYMBOL(ubcore_alloc_token_id); + +int ubcore_free_token_id(struct ubcore_token_id *token_id) +{ + struct ubcore_device *dev; + + if (token_id == NULL || token_id->ub_dev == NULL || + token_id->ub_dev->ops == NULL || + token_id->ub_dev->ops->free_token_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + dev = token_id->ub_dev; + + if (atomic_read(&token_id->use_cnt)) { + ubcore_log_err("The token_id is still being used"); + return -EBUSY; + } + return dev->ops->free_token_id(token_id); +} +EXPORT_SYMBOL(ubcore_free_token_id); + +static int ubcore_check_register_seg_para(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->register_seg == NULL || + dev->ops->unregister_seg == NULL || + IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (ubcore_is_bonding_dev(dev)) + return 0; + + if (cfg->flag.bs.pa == 1 && udata != NULL) { + ubcore_log_err("invalid parameter of pa.\n"); + return -1; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + ((cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_VALID && + cfg->token_id == NULL) || + (cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + cfg->token_id != NULL))) { + ubcore_log_err("invalid parameter of token_id.\n"); + return -1; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_VALID && + cfg->token_id->flag.bs.pa != cfg->flag.bs.pa) { + ubcore_log_err("invalid parameter of token_id pa.\n"); + return -1; + } + + if (cfg->eid_index >= dev->eid_table.eid_cnt) { + ubcore_log_warn("eid_index:%u >= eid_table cnt:%u.\n", + cfg->eid_index, dev->eid_table.eid_cnt); + return -1; + } + return 0; +} + +struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + union ubcore_token_id_flag flag = { 0 }; + bool alloc_token_id = false; + struct ubcore_seg_cfg tmp_cfg; + struct ubcore_target_seg *tseg; + + if (ubcore_check_register_seg_para(dev, cfg, udata) != 0) + return ERR_PTR(-EINVAL); + + if (udata == NULL && + cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB) + alloc_token_id = true; + + tmp_cfg = *cfg; + if (alloc_token_id == true) { + flag.bs.pa = cfg->flag.bs.pa; + tmp_cfg.token_id = ubcore_alloc_token_id(dev, flag, NULL); + if (IS_ERR_OR_NULL(tmp_cfg.token_id)) { + ubcore_log_err("alloc token id failed.\n"); + return (void *)tmp_cfg.token_id; + } + } + + tseg = dev->ops->register_seg(dev, &tmp_cfg, udata); + if (IS_ERR_OR_NULL(tseg)) { + ubcore_log_err_rl("UBEP failed to register segment.\n"); + if (alloc_token_id == true) + (void)ubcore_free_token_id(tmp_cfg.token_id); + return UBCORE_CHECK_RETURN_ERR_PTR(tseg, ENOEXEC); + } + + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg.len = tmp_cfg.len; + tseg->seg.ubva.va = tmp_cfg.va; + tseg->token_id = tmp_cfg.token_id; + + (void)memcpy(tseg->seg.ubva.eid.raw, + dev->eid_table.eid_entries[cfg->eid_index].eid.raw, + UBCORE_EID_SIZE); + (void)memcpy(&tseg->seg.attr, &cfg->flag, + sizeof(union ubcore_reg_seg_flag)); + tseg->seg.attr.bs.user_token_id = tmp_cfg.flag.bs.token_id_valid; + atomic_set(&tseg->use_cnt, 0); + if (tseg->token_id != NULL) + atomic_inc(&tseg->token_id->use_cnt); + + return tseg; +} +EXPORT_SYMBOL(ubcore_register_seg); + +int ubcore_unregister_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_token_id *token_id = NULL; + bool free_token_id = false; + struct ubcore_device *dev; + int ret; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops == NULL || + tseg->ub_dev->ops->unregister_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + + dev = tseg->ub_dev; + + if (tseg->token_id != NULL) + atomic_dec(&tseg->token_id->use_cnt); + + if (tseg->seg.attr.bs.user_token_id == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB && + tseg->token_id != NULL && tseg->uctx == NULL) { + free_token_id = true; + token_id = tseg->token_id; + } + + ret = dev->ops->unregister_seg(tseg); + + if (free_token_id == true && token_id != NULL) + (void)ubcore_free_token_id(token_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_unregister_seg); + +struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_target_seg *tseg; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->import_seg == NULL || dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return ERR_PTR(-EINVAL); + } + + if (ubcore_is_bonding_dev(dev)) { + if (ubcore_connect_exchange_udata_when_import_seg(&cfg->seg, + udata) != 0) { + ubcore_log_err( + "failed to exchange udata when import seg\n"); + return ERR_PTR(-ENOEXEC); + } + } + + tseg = dev->ops->import_seg(dev, cfg, udata); + if (IS_ERR_OR_NULL(tseg)) { + ubcore_log_err("UBEP failed to import segment with va\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(tseg, ENOEXEC); + } + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg = cfg->seg; + atomic_set(&tseg->use_cnt, 0); + + return tseg; +} +EXPORT_SYMBOL(ubcore_import_seg); + +int ubcore_unimport_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_device *dev; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops == NULL || + tseg->ub_dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + dev = tseg->ub_dev; + + return dev->ops->unimport_seg(tseg); +} +EXPORT_SYMBOL(ubcore_unimport_seg); diff --git a/drivers/ub/urma/ubcore/ubcore_topo_info.c b/drivers/ub/urma/ubcore/ubcore_topo_info.c new file mode 100644 index 000000000000..df076071f5cf --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_topo_info.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore topo info file + * Author: Liu Jiajun + * Create: 2025-07-03 + * Note: + * History: 2025-07-03 Create file + */ + +#include +#include +#include "ubcore_log.h" +#include +#include "ubcore_topo_info.h" + +static struct ubcore_topo_map *g_ubcore_topo_map; + +struct ubcore_topo_map * +ubcore_create_global_topo_map(struct ubcore_topo_info *topo_infos, + uint32_t node_num) +{ + g_ubcore_topo_map = + ubcore_create_topo_map_from_user(topo_infos, node_num); + return g_ubcore_topo_map; +} + +void ubcore_delete_global_topo_map(void) +{ + if (g_ubcore_topo_map == NULL) + return; + ubcore_delete_topo_map(g_ubcore_topo_map); + g_ubcore_topo_map = NULL; +} + +struct ubcore_topo_map *ubcore_get_global_topo_map(void) +{ + return g_ubcore_topo_map; +} + +struct ubcore_topo_map * +ubcore_create_topo_map_from_user(struct ubcore_topo_info *user_topo_infos, + uint32_t node_num) +{ + struct ubcore_topo_map *topo_map = NULL; + int ret = 0; + + if (user_topo_infos == NULL || node_num <= 0 || + node_num > MAX_NODE_NUM) { + ubcore_log_err("Invalid param\n"); + return NULL; + } + topo_map = kzalloc(sizeof(struct ubcore_topo_map), GFP_KERNEL); + if (topo_map == NULL) + return NULL; + ret = copy_from_user(topo_map->topo_infos, + (void __user *)user_topo_infos, + sizeof(struct ubcore_topo_info) * node_num); + if (ret != 0) { + ubcore_log_err("Failed to copy topo infos\n"); + kfree(topo_map); + return NULL; + } + topo_map->node_num = node_num; + return topo_map; +} + +void ubcore_delete_topo_map(struct ubcore_topo_map *topo_map) +{ + if (topo_map == NULL) + return; + kfree(topo_map); +} + +bool is_eid_valid(const char *eid) +{ + int i; + + for (i = 0; i < EID_LEN; i++) { + if (eid[i] != 0) + return true; + } + return false; +} + +bool is_bonding_and_primary_eid_valid(struct ubcore_topo_map *topo_map) +{ + int i, j; + bool has_primary_eid = false; + + for (i = 0; i < topo_map->node_num; i++) { + if (!is_eid_valid(topo_map->topo_infos[i].bonding_eid)) + return false; + has_primary_eid = false; + for (j = 0; j < IODIE_NUM; j++) { + if (is_eid_valid(topo_map->topo_infos[i] + .io_die_info[j] + .primary_eid)) + has_primary_eid = true; + } + if (!has_primary_eid) + return false; + } + return true; +} + +static int find_cur_node_index(struct ubcore_topo_map *topo_map, + uint32_t *node_index) +{ + int i; + + for (i = 0; i < topo_map->node_num; i++) { + if (topo_map->topo_infos[i].is_cur_node) { + *node_index = i; + break; + } + } + if (i == topo_map->node_num) { + ubcore_log_err("can't find cur node\n"); + return -1; + } + return 0; +} + +static bool compare_eids(const char *eid1, const char *eid2) +{ + return memcmp(eid1, eid2, EID_LEN) == 0; +} + +static int update_peer_port_eid(struct ubcore_topo_info *new_topo_info, + struct ubcore_topo_info *old_topo_info) +{ + int i, j; + char *new_peer_port_eid; + char *old_peer_port_eid; + + for (i = 0; i < IODIE_NUM; i++) { + for (j = 0; j < MAX_PORT_NUM; j++) { + if (!is_eid_valid( + new_topo_info->io_die_info[i].port_eid[j])) + continue; + + new_peer_port_eid = + new_topo_info->io_die_info[i].peer_port_eid[j]; + old_peer_port_eid = + old_topo_info->io_die_info[i].peer_port_eid[j]; + + if (!is_eid_valid(new_peer_port_eid)) + continue; + if (is_eid_valid(old_peer_port_eid) && + !compare_eids(new_peer_port_eid, + old_peer_port_eid)) { + ubcore_log_err( + "peer port eid is not same, new: " EID_FMT + ", old: " EID_FMT "\n", + EID_RAW_ARGS(new_peer_port_eid), + EID_RAW_ARGS(old_peer_port_eid)); + return -1; + } + (void)memcpy(old_peer_port_eid, new_peer_port_eid, + EID_LEN); + } + } + return 0; +} + +struct ubcore_topo_info * +ubcore_get_cur_topo_info(struct ubcore_topo_map *topo_map) +{ + uint32_t cur_node_index = 0; + + if (find_cur_node_index(topo_map, &cur_node_index) != 0) { + ubcore_log_err("find cur node index failed\n"); + return NULL; + } + return &(topo_map->topo_infos[cur_node_index]); +} + +int ubcore_update_topo_map(struct ubcore_topo_map *new_topo_map, + struct ubcore_topo_map *old_topo_map) +{ + struct ubcore_topo_info *new_cur_node_info; + struct ubcore_topo_info *old_cur_node_info; + uint32_t new_cur_node_index = 0; + uint32_t old_cur_node_index = 0; + + if (new_topo_map == NULL || old_topo_map == NULL) { + ubcore_log_err("Invalid topo map\n"); + return -EINVAL; + } + if (!is_bonding_and_primary_eid_valid(new_topo_map)) { + ubcore_log_err("Invalid primary eid\n"); + return -EINVAL; + } + if (find_cur_node_index(new_topo_map, &new_cur_node_index) != 0) { + ubcore_log_err("find cur node index failed in new topo map\n"); + return -1; + } + new_cur_node_info = &(new_topo_map->topo_infos[new_cur_node_index]); + if (find_cur_node_index(old_topo_map, &old_cur_node_index) != 0) { + ubcore_log_err("find cur node index failed in old topo map\n"); + return -1; + } + old_cur_node_info = &(old_topo_map->topo_infos[old_cur_node_index]); + + if (update_peer_port_eid(new_cur_node_info, old_cur_node_info) != 0) { + ubcore_log_err("update peer port eid failed\n"); + return -1; + } + return 0; +} + +void ubcore_show_topo_map(struct ubcore_topo_map *topo_map) +{ + int i, j, k; + struct ubcore_topo_info *cur_node_info; + + ubcore_log_info( + "========================== topo map start =============================\n"); + for (i = 0; i < topo_map->node_num; i++) { + cur_node_info = topo_map->topo_infos + i; + if (!is_eid_valid(cur_node_info->bonding_eid)) + continue; + + ubcore_log_info( + "===================== node %d start =======================\n", + i); + ubcore_log_info("bonding eid: " EID_FMT "\n", + EID_RAW_ARGS(cur_node_info->bonding_eid)); + for (j = 0; j < IODIE_NUM; j++) { + ubcore_log_info( + "**primary eid %d: " EID_FMT "\n", j, + EID_RAW_ARGS(cur_node_info->io_die_info[j] + .primary_eid)); + for (k = 0; k < MAX_PORT_NUM; k++) { + ubcore_log_info( + "****port eid %d: " EID_FMT "\n", k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .port_eid[k])); + ubcore_log_info( + "****peer_port eid %d: " EID_FMT "\n", + k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .peer_port_eid[k])); + } + } + ubcore_log_info( + "===================== node %d end =======================\n", + i); + } + ubcore_log_info( + "========================== topo map end =============================\n"); +} + +int ubcore_get_primary_eid(union ubcore_eid *eid, union ubcore_eid *primary_eid) +{ + int i, j, k; + struct ubcore_topo_info *cur_node_info; + + if (g_ubcore_topo_map == NULL) { + ubcore_log_info( + "ubcore topo map doesn't exist, eid is primary_eid.\n"); + (void)memcpy(primary_eid, eid, EID_LEN); + return 0; + } + + for (i = 0; i < g_ubcore_topo_map->node_num; i++) { + cur_node_info = g_ubcore_topo_map->topo_infos + i; + if (compare_eids(cur_node_info->bonding_eid, + (char *)eid->raw)) { + ubcore_log_err("input eid is bonding eid\n"); + return -1; + } + for (j = 0; j < IODIE_NUM; j++) { + if (compare_eids( + cur_node_info->io_die_info[j].primary_eid, + (char *)eid->raw)) { + (void)memcpy(primary_eid, + cur_node_info->io_die_info[j] + .primary_eid, + EID_LEN); + ubcore_log_info("input eid is primary eid\n"); + return 0; + } + for (k = 0; k < MAX_PORT_NUM; k++) { + if (compare_eids(cur_node_info->io_die_info[j] + .port_eid[k], + (char *)eid->raw)) { + (void)memcpy(primary_eid, + cur_node_info + ->io_die_info[j] + .primary_eid, + EID_LEN); + ubcore_log_info( + "find primary eid by port eid\n"); + return 0; + } + } + } + } + ubcore_log_err("can't find primary eid\n"); + return -1; +} + +static struct ubcore_topo_info * + ubcore_get_topo_info_by_bonding_eid(union ubcore_eid *bonding_eid) +{ + struct ubcore_topo_map *topo_map; + int i; + + topo_map = g_ubcore_topo_map; + for (i = 0; i < topo_map->node_num; i++) { + if (!memcmp(bonding_eid, topo_map->topo_infos[i].bonding_eid, + sizeof(*bonding_eid))) + return &topo_map->topo_infos[i]; + } + + ubcore_log_err( + "Failed to get topo info, bonding_eid: "EID_FMT".\n", + EID_ARGS(*bonding_eid)); + return NULL; +} + +static int ubcore_get_topo_port_eid(union ubcore_eid *src_v_eid, + union ubcore_eid *dst_v_eid, union ubcore_eid *src_p_eid, + union ubcore_eid *dst_p_eid) +{ + struct ubcore_topo_info *src_topo_info = NULL; + struct ubcore_topo_info *dst_topo_info = NULL; + int i, j; + + src_topo_info = + ubcore_get_topo_info_by_bonding_eid(src_v_eid); + if (IS_ERR_OR_NULL(src_topo_info)) { + ubcore_log_err("Failed to get src_topo_info.\n"); + return -1; + } + + dst_topo_info = + ubcore_get_topo_info_by_bonding_eid(dst_v_eid); + if (IS_ERR_OR_NULL(dst_topo_info)) { + ubcore_log_err("Failed to get dst_topo_info.\n"); + return -1; + } + + /* loop up in source topo info */ + for (i = 0; i < MAX_PORT_NUM; i++) { + if (!is_eid_valid(src_topo_info->io_die_info[0].port_eid[i]) || + !is_eid_valid(src_topo_info->io_die_info[0].peer_port_eid[i])) { + continue; + } + for (j = 0; j < MAX_PORT_NUM; j++) { + if (compare_eids(src_topo_info->io_die_info[0].peer_port_eid[i], + dst_topo_info->io_die_info[0].port_eid[j])) { + (void)memcpy(src_p_eid, + src_topo_info->io_die_info[0].port_eid[i], EID_LEN); + (void)memcpy(dst_p_eid, + src_topo_info->io_die_info[0].peer_port_eid[i], EID_LEN); + return 0; + } + } + } + + /* loop up in dest topo info */ + for (i = 0; i < MAX_PORT_NUM; i++) { + if (!is_eid_valid(dst_topo_info->io_die_info[0].port_eid[i]) || + !is_eid_valid(dst_topo_info->io_die_info[0].peer_port_eid[i])) { + continue; + } + for (j = 0; j < MAX_PORT_NUM; j++) { + if (compare_eids( + dst_topo_info->io_die_info[0].peer_port_eid[i], + src_topo_info->io_die_info[0].port_eid[j])) { + (void)memcpy(src_p_eid, + dst_topo_info->io_die_info[0].peer_port_eid[i], EID_LEN); + (void)memcpy(dst_p_eid, + dst_topo_info->io_die_info[0].port_eid[i], EID_LEN); + return 0; + } + } + } + + ubcore_log_err( + "Failed to get topo port eid, src_v_eid: "EID_FMT", dst_v_eid: "EID_FMT".\n", + EID_ARGS(*src_v_eid), EID_ARGS(*dst_v_eid)); + return -1; +} + +int ubcore_get_primary_eid_by_bonding_eid(union ubcore_eid *bonding_eid, + union ubcore_eid *primary_eid) +{ + struct ubcore_topo_map *topo_map; + int i; + + topo_map = ubcore_get_global_topo_map(); + if (topo_map == NULL) { + ubcore_log_err("Failed get global topo map"); + return -1; + } + + for (i = 0; i < topo_map->node_num; i++) { + if (!memcmp(bonding_eid, topo_map->topo_infos[i].bonding_eid, + sizeof(*bonding_eid))) { + *primary_eid = *((union ubcore_eid *) + topo_map->topo_infos[i].io_die_info[0].primary_eid); + return 0; + } + } + return -1; +} + +static int ubcore_get_topo_primary_eid(union ubcore_eid *src_v_eid, + union ubcore_eid *dst_v_eid, union ubcore_eid *src_p_eid, + union ubcore_eid *dst_p_eid) +{ + int ret; + + ret = ubcore_get_primary_eid_by_bonding_eid(src_v_eid, src_p_eid); + if (ret != 0) { + ubcore_log_err( + "Failed to get src_p_eid, src_v_eid: "EID_FMT".\n", + EID_ARGS(*src_v_eid)); + return ret; + } + + ret = ubcore_get_primary_eid_by_bonding_eid(dst_v_eid, dst_p_eid); + if (ret != 0) { + ubcore_log_err( + "Failed to get dst_p_eid, dst_v_eid: "EID_FMT".\n", + EID_ARGS(*dst_v_eid)); + return ret; + } + + return 0; +} + +int ubcore_get_topo_eid(uint32_t tp_type, union ubcore_eid *src_v_eid, + union ubcore_eid *dst_v_eid, union ubcore_eid *src_p_eid, + union ubcore_eid *dst_p_eid) +{ + int ret = 0; + + if (src_v_eid == NULL || dst_v_eid == NULL || + src_p_eid == NULL || dst_p_eid == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (g_ubcore_topo_map == NULL) { + ubcore_log_err( + "Failed to get p_eid, ubcore topo map doesn't exist.\n"); + return -1; + } + + switch (tp_type) { + case UBCORE_RTP: + case UBCORE_UTP: + ret = ubcore_get_topo_port_eid(src_v_eid, dst_v_eid, + src_p_eid, dst_p_eid); + break; + case UBCORE_CTP: + ret = ubcore_get_topo_primary_eid(src_v_eid, dst_v_eid, + src_p_eid, dst_p_eid); + break; + default: + ubcore_log_err("Invalid tp tpye: %u.\n", tp_type); + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(ubcore_get_topo_eid); diff --git a/drivers/ub/urma/ubcore/ubcore_topo_info.h b/drivers/ub/urma/ubcore/ubcore_topo_info.h new file mode 100644 index 000000000000..83a2a539d7aa --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_topo_info.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore topo info head file + * Author: Liu Jiajun + * Create: 2025-07-03 + * Note: + * History: 2025-07-03 Create file + */ + +#ifndef UBCORE_TOPO_INFO_H +#define UBCORE_TOPO_INFO_H + +#include + +#define EID_LEN (16) +#define MAX_PORT_NUM (9) +#define MAX_NODE_NUM (16) +#define IODIE_NUM (2) + +struct ubcore_iodie_info { + char primary_eid[EID_LEN]; + char port_eid[MAX_PORT_NUM][EID_LEN]; + char peer_port_eid[MAX_PORT_NUM][EID_LEN]; + int socket_id; +}; + +struct ubcore_topo_info { + char bonding_eid[EID_LEN]; + struct ubcore_iodie_info io_die_info[IODIE_NUM]; + bool is_cur_node; +}; + +struct ubcore_topo_map { + struct ubcore_topo_info topo_infos[MAX_NODE_NUM]; + uint32_t node_num; +}; + +struct ubcore_topo_map * +ubcore_create_global_topo_map(struct ubcore_topo_info *topo_infos, + uint32_t node_num); +void ubcore_delete_global_topo_map(void); +struct ubcore_topo_map *ubcore_get_global_topo_map(void); +struct ubcore_topo_map * +ubcore_create_topo_map_from_user(struct ubcore_topo_info *user_topo_infos, + uint32_t node_num); +void ubcore_delete_topo_map(struct ubcore_topo_map *topo_map); +bool is_eid_valid(const char *eid); +bool is_bonding_and_primary_eid_valid(struct ubcore_topo_map *topo_map); +struct ubcore_topo_info * +ubcore_get_cur_topo_info(struct ubcore_topo_map *topo_map); +int ubcore_update_topo_map(struct ubcore_topo_map *new_topo_map, + struct ubcore_topo_map *old_topo_map); +void ubcore_show_topo_map(struct ubcore_topo_map *topo_map); +int ubcore_get_primary_eid(union ubcore_eid *eid, + union ubcore_eid *primary_eid); + +int ubcore_get_primary_eid_by_bonding_eid(union ubcore_eid *bonding_eid, + union ubcore_eid *primary_eid); + +#endif // UBCORE_TOPO_INFO_H diff --git a/drivers/ub/urma/ubcore/ubcore_tp.c b/drivers/ub/urma/ubcore/ubcore_tp.c new file mode 100644 index 000000000000..490a5abb7076 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp.c @@ -0,0 +1,1693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp implementation + * Author: Yan Fangfang + * Create: 2022-08-25 + * Note: + * History: 2022-08-25: Create file + */ + +#include +#include +#include +#include +#include +#include +#include "ubcore_log.h" +#include "ubcore_netlink.h" +#include "ubcore_priv.h" +#include +#include "ubcore_tp_table.h" +#include "ubcore_msg.h" +#include "ubcore_vtp.h" +#include +#include "ubcore_uvs.h" +#include "ubcore_tp.h" + +#define UB_PROTOCOL_HEAD_BYTES 313 +#define UB_MTU_BITS_BASE_SHIFT 7 +/* to guarantee all bitmaps filled as 1 */ +#define UBCORE_TP_ATTR_MASK 0xFFFFFFFF +#define UBCORE_MAX_TP_EXT_LEN 2048 + +static inline uint32_t get_udrv_in_len(struct ubcore_udata *udata) +{ + return ((udata == NULL || udata->udrv_data == NULL) ? + 0 : + udata->udrv_data->in_len); +} + +static inline int get_udrv_in_data(uint8_t *dst, uint32_t dst_len, + struct ubcore_udata *udata) +{ + if (get_udrv_in_len(udata) == 0) + return 0; + + if (udata->uctx != NULL) { + if (dst_len < udata->udrv_data->in_len) + return -1; + return (int)copy_from_user( + dst, + (void __user *)(uintptr_t)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + } else { + (void)memcpy(dst, (void *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + return 0; + } +} + +static inline int ubcore_mtu_enum_to_int(enum ubcore_mtu mtu) +{ + return 1 << ((int)mtu + UB_MTU_BITS_BASE_SHIFT); +} + +enum ubcore_mtu ubcore_get_mtu(int mtu) +{ + int tmp_mtu = mtu - UB_PROTOCOL_HEAD_BYTES; + + if (mtu < 0) + return 0; + + if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_8192)) + return UBCORE_MTU_8192; + if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_4096)) + return UBCORE_MTU_4096; + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_2048)) + return UBCORE_MTU_2048; + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_1024)) + return UBCORE_MTU_1024; + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_512)) + return UBCORE_MTU_512; + else if (tmp_mtu >= ubcore_mtu_enum_to_int(UBCORE_MTU_256)) + return UBCORE_MTU_256; + else + return 0; +} +EXPORT_SYMBOL(ubcore_get_mtu); + +static int ubcore_set_tp_peer_ext(struct ubcore_tp_attr *attr, + uint64_t ext_addr, uint32_t ext_len) +{ + void *peer_ext = NULL; + int ret; + + /* ext is unused */ + if (ext_len == 0 || ext_addr == 0) + return 0; + + if (ext_len > UBCORE_MAX_TP_EXT_LEN) + return -EINVAL; + + /* copy resp ext from req or response */ + peer_ext = kzalloc(ext_len, GFP_KERNEL); + if (peer_ext == NULL) + return -ENOMEM; + + ret = (int)copy_from_user(peer_ext, (void __user *)(uintptr_t)ext_addr, + ext_len); + if (ret != 0) { + kfree(peer_ext); + return -1; + } + attr->peer_ext.addr = (uint64_t)peer_ext; + attr->peer_ext.len = ext_len; + return 0; +} + +static inline void ubcore_unset_tp_peer_ext(struct ubcore_tp_attr *attr) +{ + if (attr->peer_ext.addr != 0) { + kfree((void *)attr->peer_ext.addr); + attr->peer_ext.addr = 0; + attr->peer_ext.len = 0; + } +} + +static void ubcore_get_ta_data_from_ta(const struct ubcore_ta *ta, + enum ubcore_transport_type trans_type, + struct ubcore_ta_data *ta_data) +{ + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; + + ta_data->ta_type = ta->type; + switch (ta->type) { + case UBCORE_TA_JFS_TJFR: + jfs = ta->jfs; + if (jfs->jfs_cfg.eid_index >= jfs->ub_dev->eid_table.eid_cnt || + IS_ERR_OR_NULL(jfs->ub_dev->eid_table.eid_entries)) + return; + ta_data->jetty_id.eid = + jfs->ub_dev->eid_table + .eid_entries[jfs->jfs_cfg.eid_index] + .eid; + ta_data->jetty_id.id = jfs->jfs_id.id; + ta_data->tjetty_id = ta->tjetty_id; + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ta->jetty; + if (jetty->jetty_cfg.eid_index >= + jetty->ub_dev->eid_table.eid_cnt || + IS_ERR_OR_NULL(jetty->ub_dev->eid_table.eid_entries)) + return; + ta_data->jetty_id.eid = + jetty->ub_dev->eid_table + .eid_entries[jetty->jetty_cfg.eid_index] + .eid; + ta_data->jetty_id.id = jetty->jetty_id.id; + ta_data->tjetty_id = ta->tjetty_id; + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return; + } + ta_data->trans_type = trans_type; +} + +static void ubcore_tp_kref_release(struct kref *ref_cnt) +{ + struct ubcore_tp *tp = container_of(ref_cnt, struct ubcore_tp, ref_cnt); + + complete(&tp->comp); +} + +void ubcore_tp_kref_put(struct ubcore_tp *tp) +{ + (void)kref_put(&tp->ref_cnt, ubcore_tp_kref_release); +} + +void ubcore_tp_get(void *obj) +{ + struct ubcore_tp *tp = obj; + + kref_get(&tp->ref_cnt); +} + +int ubcore_destroy_tp(struct ubcore_tp *tp) +{ + if (tp == NULL) + return -EINVAL; + if (!ubcore_have_tp_ops(tp->ub_dev)) { + ubcore_log_err("TP ops is NULL"); + return -EINVAL; + } + if (tp->peer_ext.len > 0 && tp->peer_ext.addr != 0) + kfree((void *)tp->peer_ext.addr); + + mutex_destroy(&tp->lock); + return tp->ub_dev->ops->destroy_tp(tp); +} +EXPORT_SYMBOL(ubcore_destroy_tp); + +static void ubcore_set_tp_flag(union ubcore_tp_flag *flag, + struct ubcore_tp_cfg *cfg, + struct ubcore_device *dev) +{ + flag->bs.target = cfg->flag.bs.target; +} + +void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, struct ubcore_tp_cfg *cfg) +{ + ubcore_set_tp_flag(&tp->flag, cfg, tp->ub_dev); + if (tp->ub_dev->transport_type == UBCORE_TRANSPORT_UB && + tp->trans_mode == UBCORE_TP_RC) { + tp->local_jetty = cfg->local_jetty; + tp->peer_jetty = cfg->peer_jetty; + } else { + tp->local_eid = cfg->local_eid; + tp->peer_eid = cfg->peer_eid; + } + + tp->trans_mode = cfg->trans_mode; + tp->tx_psn = 0; + tp->retry_num = cfg->retry_num; + tp->ack_timeout = cfg->ack_timeout; + tp->dscp = cfg->dscp; + tp->oor_cnt = cfg->oor_cnt; +} + +struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, + struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_tp *tp = NULL; + + if (!ubcore_have_tp_ops(dev)) { + ubcore_log_err("Invalid parameter"); + return ERR_PTR(-EINVAL); + } + + tp = dev->ops->create_tp(dev, cfg, udata); + if (IS_ERR_OR_NULL(tp)) { + ubcore_log_err("Failed to create tp towards remote eid %pI6c", + &cfg->peer_eid); + if (tp == NULL) + return ERR_PTR(-ENOEXEC); + return tp; + } + /* The driver may return the old tp pointer */ + if (tp->state != UBCORE_TP_STATE_RESET) + return tp; + + tp->ub_dev = dev; + ubcore_set_tp_init_cfg(tp, cfg); + tp->state = UBCORE_TP_STATE_RESET; + ubcore_log_info_rl("tp state:(set to RESET) with tpn %u", tp->tpn); + tp->priv = NULL; + atomic_set(&tp->use_cnt, 0); + mutex_init(&tp->lock); + + return tp; +} + +int ubcore_fill_netaddr_macvlan(struct ubcore_net_addr *netaddr, + struct net_device *netdev, + enum ubcore_net_addr_type type) +{ + netaddr->type = type; + + if (!netdev) + return -EINVAL; + + /* UB does not have a mac address + * to prevent the duplication of the mac address from hanging + */ + if (netdev->type == UBCORE_NETDEV_UB_TYPE) { + ubcore_log_err("Pure ub does not support uboe mac\n"); + return -1; + } + (void)memcpy(netaddr->mac, netdev->dev_addr, netdev->addr_len); + if (is_vlan_dev(netdev)) + netaddr->vlan = vlan_dev_vlan_id(netdev); + else + netaddr->vlan = 0; + + return 0; +} + +/* check if current tp state can be truned into new tp state */ +int ubcore_modify_tp_state_check(struct ubcore_tp *tp, + enum ubcore_tp_state new_state) +{ + int ret = 0; + + switch (tp->state) { + case UBCORE_TP_STATE_RESET: + if (new_state != UBCORE_TP_STATE_RTR) + ret = -1; + break; + case UBCORE_TP_STATE_RTR: + if (new_state != UBCORE_TP_STATE_ERR && + new_state != UBCORE_TP_STATE_RTS) + ret = -1; + break; + case UBCORE_TP_STATE_RTS: + if (new_state != UBCORE_TP_STATE_ERR && + new_state != UBCORE_TP_STATE_SUSPENDED) + ret = -1; + break; + case UBCORE_TP_STATE_SUSPENDED: + if (new_state != UBCORE_TP_STATE_RTS && + new_state != UBCORE_TP_STATE_ERR) + ret = -1; + break; + case UBCORE_TP_STATE_ERR: + /* ERR -> ERR is allowed */ + if (new_state != UBCORE_TP_STATE_ERR && + new_state != UBCORE_TP_STATE_RESET) + ret = -1; + break; + default: + ret = -1; + break; + } + + if (ret != 0) { + ubcore_log_err( + "Failed to check modify_tp state: tpn = %u; old_state %u -> new_state %u", + tp->tpn, (uint32_t)tp->state, (uint32_t)new_state); + } else { + ubcore_log_debug( + "modify_tp state check: tpn = %u; old_state %u -> new_state %u", + tp->tpn, (uint32_t)tp->state, (uint32_t)new_state); + } + + return ret; +} + +int ubcore_modify_tp_state(struct ubcore_device *dev, struct ubcore_tp *tp, + enum ubcore_tp_state new_state, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask) +{ + enum ubcore_tp_state old_state = tp->state; + + mutex_lock(&tp->lock); + old_state = tp->state; + if (ubcore_modify_tp_state_check(tp, new_state) != 0) { + mutex_unlock(&tp->lock); + return -1; + } + + if (tp->state == UBCORE_TP_STATE_ERR && + new_state == UBCORE_TP_STATE_ERR) { + ubcore_log_info("tp is already in ERR state and tpn = %u", + tp->tpn); + mutex_unlock(&tp->lock); + return 0; + } + + if (dev == NULL || dev->ops == NULL || + dev->ops->modify_tp(tp, attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err( + "Failed to modify tp to %u from state %u and tpn = %u", + (uint32_t)new_state, (uint32_t)tp->state, tp->tpn); + mutex_unlock(&tp->lock); + return -1; + } + tp->state = new_state; + ubcore_log_info("change tp state:(%u to %u) with tpn %u, peer_tpn %u", + (uint32_t)old_state, (uint32_t)new_state, tp->tpn, + tp->peer_tpn); + mutex_unlock(&tp->lock); + return 0; +} + +static int ubcore_modify_tp_to_rts(const struct ubcore_device *dev, + struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + attr.state = UBCORE_TP_STATE_RTS; + attr.tx_psn = 0; + + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + /* tp->peer_ext.addr will be freed when called ubcore_destroy_tp */ + ubcore_log_err("Failed to modify tp"); + return -1; + } + tp->state = UBCORE_TP_STATE_RTS; + ubcore_log_debug("tp state:(RTR to RTS) with tpn %u, peer_tpn %u", + tp->tpn, tp->peer_tpn); + return 0; +} + +#define ubcore_mod_tp_attr_with_mask(tp, attr, field, mask) \ + (tp->field = mask.bs.field ? attr->field : tp->field) + +#define ubcore_log_tp_attr(tp, attr, field, mask) \ + do { \ + if (mask.bs.field) { \ + ubcore_log_info("dev: %s, tp:%u, modify " #field \ + " to: %u ", \ + tp->ub_dev->dev_name, tp->tpn, \ + (uint32_t)tp->field); \ + } \ + } while (0) + +void ubcore_modify_tp_attr(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask) +{ + if (mask.bs.flag) { + tp->flag.bs.oor_en = attr->flag.bs.oor_en; + tp->flag.bs.sr_en = attr->flag.bs.sr_en; + tp->flag.bs.cc_en = attr->flag.bs.cc_en; + tp->flag.bs.cc_alg = attr->flag.bs.cc_alg; + tp->flag.bs.spray_en = attr->flag.bs.spray_en; + tp->flag.bs.clan = attr->flag.bs.clan; + } + + ubcore_mod_tp_attr_with_mask(tp, attr, peer_tpn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, state, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, tx_psn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, rx_psn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, mtu, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, cc_pattern_idx, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, peer_ext, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, oos_cnt, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, local_net_addr_idx, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, peer_net_addr, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, data_udp_start, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, ack_udp_start, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, udp_range, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, hop_limit, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, port_id, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, flow_label, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, mn, mask); + ubcore_mod_tp_attr_with_mask(tp, attr, peer_trans_type, mask); + + // log for attr change + ubcore_log_tp_attr(tp, attr, data_udp_start, mask); +} + +static int ubcore_set_target_peer(struct ubcore_tp *tp, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + struct ubcore_tp_attr *tp_attr, + struct ubcore_udata udata) +{ + mask->value = UBCORE_TP_ATTR_MASK; + + memset(attr, 0, sizeof(*attr)); + (void)memcpy(attr, tp_attr, sizeof(struct ubcore_tp_attr)); + attr->tx_psn = tp_attr->rx_psn; + attr->state = UBCORE_TP_STATE_RTR; + + if (tp->peer_ext.addr != 0) + return 0; + + return ubcore_set_tp_peer_ext(attr, udata.udrv_data->in_addr, + udata.udrv_data->in_len); +} + +static void ubcore_set_jetty_for_tp_param(struct ubcore_ta *ta, + enum ubcore_transport_mode trans_mode, + struct ubcore_vtp_param *vtp_param) +{ + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; + + (void)memset(vtp_param, 0, sizeof(struct ubcore_vtp_param)); + if (ta == NULL) + return; + + switch (ta->type) { + case UBCORE_TA_JFS_TJFR: + jfs = ta->jfs; + if (jfs->jfs_cfg.eid_index >= jfs->ub_dev->eid_table.eid_cnt || + IS_ERR_OR_NULL(jfs->ub_dev->eid_table.eid_entries)) + return; + vtp_param->local_eid = + jfs->ub_dev->eid_table + .eid_entries[jfs->jfs_cfg.eid_index] + .eid; + vtp_param->local_jetty = jfs->jfs_id.id; + vtp_param->eid_index = jfs->jfs_cfg.eid_index; + break; + case UBCORE_TA_JETTY_TJETTY: + jetty = ta->jetty; + if (jetty->jetty_cfg.eid_index >= + jetty->ub_dev->eid_table.eid_cnt || + IS_ERR_OR_NULL(jetty->ub_dev->eid_table.eid_entries)) + return; + vtp_param->local_eid = + jetty->ub_dev->eid_table + .eid_entries[jetty->jetty_cfg.eid_index] + .eid; + vtp_param->local_jetty = jetty->jetty_id.id; + vtp_param->eid_index = jetty->jetty_cfg.eid_index; + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return; + } + vtp_param->trans_mode = trans_mode; + vtp_param->peer_eid = ta->tjetty_id.eid; + vtp_param->peer_jetty = ta->tjetty_id.id; + vtp_param->ta = *ta; +} + +int ubcore_modify_tp(struct ubcore_device *dev, struct ubcore_tp_node *tp_node, + struct ubcore_tp_attr *tp_attr, struct ubcore_udata udata) +{ + struct ubcore_tp *tp = tp_node->tp; + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + int ret = 0; + + mutex_lock(&tp_node->lock); + + switch (tp->state) { + case UBCORE_TP_STATE_RTS: + ubcore_log_info_rl( + "Reuse tp state:(RTS) with tpn %u, peer_tpn %u", + tp->tpn, tp->peer_tpn); + break; + case UBCORE_TP_STATE_RESET: + /* Modify target tp to RTR */ + if (ubcore_set_target_peer(tp, &attr, &mask, tp_attr, udata) != + 0) { + ubcore_log_err("Failed to set target peer"); + ret = -1; + break; + } + if (dev->ops->modify_tp(tp, &attr, mask) != 0) { + ubcore_unset_tp_peer_ext(&attr); + ubcore_log_err("Failed to modify tp"); + ret = -1; + break; + } + ubcore_modify_tp_attr(tp, &attr, mask); + ubcore_log_info_rl( + "tp state:(RESET to RTR) with tpn %u, peer_tpn %u", + tp->tpn, tp->peer_tpn); + break; + case UBCORE_TP_STATE_RTR: + ret = ubcore_modify_tp_to_rts(dev, tp); + ubcore_log_info_rl( + "tp state:(RTR to RTS) with tpn %u, peer_tpn %u", + tp->tpn, tp->peer_tpn); + break; + case UBCORE_TP_STATE_SUSPENDED: + ubcore_log_info_rl("tp state: TP_STATE_SUSPENDED\n"); + fallthrough; + + case UBCORE_TP_STATE_ERR: + ubcore_log_info_rl("tp state: TP_STATE_ERR\n"); + fallthrough; + + default: + ret = -1; + break; + } + + mutex_unlock(&tp_node->lock); + return ret; +} + +static int ubcore_parse_ta(struct ubcore_device *dev, + struct ubcore_ta_data *ta_data, + struct ubcore_tp_meta *meta) +{ + struct ubcore_jetty *jetty; + struct ubcore_jfs *jfs; + + switch (ta_data->ta_type) { + case UBCORE_TA_JFS_TJFR: + jfs = ubcore_find_get_jfs(dev, ta_data->tjetty_id.id); + if (jfs != NULL) { + meta->ht = ubcore_get_tptable(jfs->tptable); + ubcore_put_jfs(jfs); + } + break; + case UBCORE_TA_JETTY_TJETTY: + /* todonext: add kref to jetty, as it may be destroyed any time */ + jetty = ubcore_find_get_jetty(dev, ta_data->tjetty_id.id); + if (jetty != NULL) { + if (jetty->jetty_cfg.trans_mode == UBCORE_TP_RC && + jetty->remote_jetty != NULL && + memcmp(&jetty->remote_jetty->cfg.id, + &ta_data->jetty_id, + sizeof(struct ubcore_jetty_id))) { + ubcore_log_err( + "the same jetty is binded with another remote jetty.\n"); + ubcore_put_jetty(jetty); + return -1; + } + meta->ht = ubcore_get_tptable(jetty->tptable); + ubcore_put_jetty(jetty); + } + break; + case UBCORE_TA_NONE: + case UBCORE_TA_VIRT: + default: + return -1; + } + ubcore_init_tp_key_jetty_id(&meta->key, &ta_data->jetty_id); + + /* jetty and jfs should be indexed consecutively */ + meta->hash = ubcore_get_jetty_hash(&ta_data->jetty_id); + return 0; +} + +static int ubcore_init_create_tp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *tp_param, + struct ubcore_tp *tp, + struct ubcore_udata *udata, + struct ubcore_create_vtp_req *data) +{ + data->trans_mode = tp_param->trans_mode; + data->local_eid = tp_param->local_eid; + data->peer_eid = tp_param->peer_eid; + data->eid_index = tp_param->eid_index; + data->local_jetty = tp_param->local_jetty; + data->peer_jetty = tp_param->peer_jetty; + (void)strscpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME - 1); + data->virtualization = dev->attr.virtualization; + + ubcore_get_ta_data_from_ta(&tp_param->ta, dev->transport_type, + &data->ta_data); + data->udrv_in_len = get_udrv_in_len(udata); + data->ext_len = tp->tp_ext.len; + + if (get_udrv_in_data(data->udrv_ext, get_udrv_in_len(udata), udata) != + 0) { + ubcore_log_err("Failed to get udrv data"); + return -1; + } + if (tp->tp_ext.len > 0) + (void)memcpy(data->udrv_ext + get_udrv_in_len(udata), + (void *)tp->tp_ext.addr, tp->tp_ext.len); + + return 0; +} + +static int ubcore_send_create_tp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *tp_param, + struct ubcore_tp *tp, + struct ubcore_udata *udata) +{ + struct ubcore_create_vtp_req *data; + struct ubcore_req *req_msg; + uint32_t payload_len; + uint32_t udata_len; + uint32_t tp_len; + + /* dev has been unregistered and the message channel has been down */ + if (ubcore_check_dev_is_exist(dev->dev_name) == false) + return -ENONET; + + tp_len = tp->tp_ext.len; + udata_len = get_udrv_in_len(udata); + if (tp_len + udata_len > UBCORE_MAX_UDRV_EXT_LEN || + tp_len + udata_len < udata_len) { + ubcore_log_err("Invalid len tp_len:%u, udata_len:%u", tp_len, + udata_len); + return -EINVAL; + } + + payload_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + req_msg = + kcalloc(1, sizeof(struct ubcore_req) + payload_len, GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->opcode = UBCORE_MSG_CREATE_VTP; + req_msg->len = payload_len; + data = (struct ubcore_create_vtp_req *)req_msg->data; + if (ubcore_init_create_tp_req(dev, tp_param, tp, udata, data) != 0) { + kfree(req_msg); + return -ENOEXEC; + } + kfree(req_msg); + return 0; +} + +static int ubcore_send_del_tp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *tp_param) +{ + struct ubcore_create_vtp_req *data; + struct ubcore_req *req_msg; + + /* dev has been unregistered and the message channel has been down */ + if (ubcore_check_dev_is_exist(dev->dev_name) == false) + return -ENONET; + + req_msg = kcalloc(1, + sizeof(struct ubcore_req) + + sizeof(struct ubcore_create_vtp_req), + GFP_KERNEL); + if (req_msg == NULL) + return -ENOMEM; + + req_msg->opcode = UBCORE_MSG_DESTROY_VTP; + req_msg->len = sizeof(struct ubcore_create_vtp_req); + data = (struct ubcore_create_vtp_req *)req_msg->data; + data->trans_mode = tp_param->trans_mode; + data->local_eid = tp_param->local_eid; + data->peer_eid = tp_param->peer_eid; + data->eid_index = tp_param->eid_index; + data->local_jetty = tp_param->local_jetty; + data->peer_jetty = tp_param->peer_jetty; + (void)strscpy(data->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME - 1); + data->virtualization = dev->attr.virtualization; + /* for alpha start */ + ubcore_get_ta_data_from_ta(&tp_param->ta, dev->transport_type, + &data->ta_data); + kfree(req_msg); + return 0; +} + +static struct ubcore_nlmsg * +ubcore_get_query_tp_req(struct ubcore_device *dev, + enum ubcore_transport_mode trans_mode) +{ + uint32_t payload_len = sizeof(struct ubcore_nl_query_tp_req); + struct ubcore_nl_query_tp_req *query; + struct ubcore_nlmsg *req; + + req = kzalloc(sizeof(struct ubcore_nlmsg) + payload_len, GFP_KERNEL); + if (req == NULL) + return NULL; + + req->transport_type = dev->transport_type; + req->msg_type = UBCORE_CMD_QUERY_TP_REQ; + req->payload_len = payload_len; + query = (struct ubcore_nl_query_tp_req *)req->payload; + query->trans_mode = trans_mode; + (void)memcpy(query->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + query->ue_idx = dev->attr.ue_idx; + return req; +} + +static int ubcore_query_tp(struct ubcore_device *dev, + enum ubcore_transport_mode trans_mode, + struct ubcore_nl_query_tp_resp *query_tp_resp) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_uvs_instance *uvs = NULL; + struct ubcore_nl_query_tp_resp *resp; + int ret = 0; + + req_msg = ubcore_get_query_tp_req(dev, trans_mode); + if (req_msg == NULL) { + ubcore_log_err("Failed to get query tp req"); + return -1; + } + + uvs = ubcore_find_get_uvs_by_ue(dev, dev->attr.ue_idx); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs for ue %u", + dev->attr.ue_idx); + kfree(req_msg); + return -1; + } + + resp_msg = ubcore_nl_send_wait(dev, req_msg, uvs); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait query response"); + ubcore_uvs_kref_put(uvs); + kfree(req_msg); + return -1; + } + + resp = (struct ubcore_nl_query_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != UBCORE_CMD_QUERY_TP_RESP || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ret = -1; + ubcore_log_err( + "Query tp request is rejected with type %d ret %d", + resp_msg->msg_type, (resp == NULL ? 1 : resp->ret)); + } else { + (void)memcpy(query_tp_resp, resp, + sizeof(struct ubcore_nl_query_tp_resp)); + } + + ubcore_uvs_kref_put(uvs); + kfree(resp_msg); + kfree(req_msg); + return ret; +} + +static void +ubcore_set_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, + struct ubcore_vtp_param *tp_param, + struct ubcore_nl_query_tp_resp *query_tp_resp) +{ + cfg->flag.bs.target = 0; + cfg->local_jetty.eid = tp_param->local_eid; + cfg->local_jetty.id = tp_param->local_jetty; + cfg->peer_jetty.eid = tp_param->peer_eid; + cfg->peer_jetty.id = tp_param->peer_jetty; + cfg->trans_mode = tp_param->trans_mode; + cfg->retry_factor = query_tp_resp->retry_factor; + cfg->retry_num = query_tp_resp->retry_num; + cfg->ack_timeout = query_tp_resp->ack_timeout; + cfg->dscp = query_tp_resp->dscp; + cfg->oor_cnt = query_tp_resp->oor_cnt; +} + +static int ubcore_query_initiator_tp_cfg(struct ubcore_tp_cfg *cfg, + struct ubcore_device *dev, + struct ubcore_vtp_param *tp_param) +{ + struct ubcore_nl_query_tp_resp query_tp_resp; + + if (ubcore_query_tp(dev, tp_param->trans_mode, &query_tp_resp) != 0) { + ubcore_log_err("Failed to query tp"); + return -1; + } + ubcore_set_initiator_tp_cfg(cfg, tp_param, &query_tp_resp); + return 0; +} + +/* udata may be empty because the data may come from the user space or kernel space. */ +int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata) +{ + struct ubcore_vtp_param tp_param = { 0 }; + struct ubcore_tp_cfg tp_cfg = { 0 }; + struct ubcore_tp *new_tp = NULL; + struct ubcore_tp_node *tp_node; + struct ubcore_device *dev; + + if (jetty == NULL || tjetty == NULL || advice == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + dev = jetty->ub_dev; + + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RC, &tp_param); + if (ubcore_query_initiator_tp_cfg(&tp_cfg, dev, &tp_param) != 0) { + ubcore_log_err("Failed to init tp cfg.\n"); + return -1; + } + /* driver gurantee to return the same tp if we have created it as a target */ + new_tp = ubcore_create_tp(dev, &tp_cfg, udata); + if (IS_ERR_OR_NULL(new_tp)) { + ubcore_log_err("Failed to create tp"); + return PTR_ERR(new_tp); + } + tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, + &advice->meta.key, new_tp, &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + ubcore_log_err("Failed to find and add tp\n"); + return -1; + } else if (tp_node != NULL && tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + + mutex_lock(&tjetty->lock); + if (tjetty->tp != NULL) { + mutex_unlock(&tjetty->lock); + ubcore_tpnode_kref_put(tp_node); + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + ubcore_log_err( + "The same tjetty, different jetty, prevent duplicate bind.\n"); + return -1; + } + + if (ubcore_send_create_tp_req(dev, &tp_param, tp_node->tp, udata) != + 0) { + ubcore_log_err("Failed to send tp req"); + mutex_unlock(&tjetty->lock); + ubcore_tpnode_kref_put(tp_node); + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + return -1; + } + tjetty->tp = tp_node->tp; + ubcore_tpnode_kref_put(tp_node); + mutex_unlock(&tjetty->lock); + return 0; +} +EXPORT_SYMBOL(ubcore_bind_tp); + +int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice) +{ + struct ubcore_vtp_param tp_param; + + if (jetty == NULL || tjetty == NULL || advice == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + mutex_lock(&tjetty->lock); + if (tjetty->tp == NULL) { + mutex_unlock(&tjetty->lock); + ubcore_log_warn( + "TP is not found, already removed or under use\n"); + return 0; + } + mutex_unlock(&tjetty->lock); + + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RC, &tp_param); + if (ubcore_send_del_tp_req(jetty->ub_dev, &tp_param) != 0) { + ubcore_log_warn("failed to unbind tp\n"); + /* It does not depend on the success of the peer TP, + * but depends on the success of the local cleanup, + * otherwise the TP remains. + */ + } + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + + mutex_lock(&tjetty->lock); + tjetty->tp = NULL; + mutex_unlock(&tjetty->lock); + return 0; +} +EXPORT_SYMBOL(ubcore_unbind_tp); + +/* udata may be empty because the data may come from the user space or kernel space. */ +int ubcore_advise_tp(struct ubcore_device *dev, union ubcore_eid *remote_eid, + struct ubcore_tp_advice *advice, + struct ubcore_udata *udata) +{ + struct ubcore_vtp_param tp_param = { 0 }; + struct ubcore_tp_cfg tp_cfg = { 0 }; + struct ubcore_tp_node *tp_node; + struct ubcore_tp *new_tp; + + if (dev == NULL || remote_eid == NULL || advice == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + /* Must call driver->create_tp with udata if we are advising jetty */ + tp_node = ubcore_lookup_tpnode(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + if (tp_node != NULL && tp_node->tp != NULL && + !tp_node->tp->flag.bs.target) { + ubcore_tpnode_kref_put(tp_node); + return 0; + } + + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RM, &tp_param); + if (ubcore_query_initiator_tp_cfg(&tp_cfg, dev, &tp_param) != 0) { + ubcore_log_err("Failed to init tp cfg.\n"); + return -1; + } + /* advise tp requires the user to pass in the pin memory operation + * and cannot be used in the uvs context ioctl to create tp + */ + new_tp = ubcore_create_tp(dev, &tp_cfg, udata); + if (IS_ERR_OR_NULL(new_tp)) { + ubcore_log_err("Failed to create tp"); + return PTR_ERR(new_tp); + } + tp_node = ubcore_add_tp_node(advice->meta.ht, advice->meta.hash, + &advice->meta.key, new_tp, &advice->ta); + if (tp_node == NULL) { + (void)ubcore_destroy_tp(new_tp); + ubcore_log_err("Failed to find and add tp\n"); + return -1; + } else if (tp_node != NULL && tp_node->tp != new_tp) { + (void)ubcore_destroy_tp(new_tp); + new_tp = NULL; + } + + if (ubcore_send_create_tp_req(dev, &tp_param, tp_node->tp, udata) != + 0) { + ubcore_tpnode_kref_put(tp_node); + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + ubcore_log_err("Failed to send tp req"); + return -1; + } + ubcore_tpnode_kref_put(tp_node); + return 0; +} +EXPORT_SYMBOL(ubcore_advise_tp); + +int ubcore_unadvise_tp(struct ubcore_device *dev, + struct ubcore_tp_advice *advice) +{ + struct ubcore_vtp_param tp_param; + int ret; + + if (dev == NULL || advice == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ubcore_set_jetty_for_tp_param(&advice->ta, UBCORE_TP_RM, &tp_param); + ret = ubcore_send_del_tp_req(dev, &tp_param); + if (ret != 0) { + ubcore_log_warn("failed to unadvise tp\n"); + /* It does not depend on the success of the peer TP, + * but depends on the success of the local cleanup, + * otherwise the TP remains. + */ + } + ubcore_find_remove_tp(advice->meta.ht, advice->meta.hash, + &advice->meta.key); + return 0; +} +EXPORT_SYMBOL(ubcore_unadvise_tp); + +static void ubcore_get_ta_from_tp(struct ubcore_ta *ta, struct ubcore_tp *tp) +{ + struct ubcore_tp_node *tp_node = (struct ubcore_tp_node *)tp->priv; + + ta->type = UBCORE_TA_NONE; + switch (tp->trans_mode) { + case UBCORE_TP_RC: + case UBCORE_TP_RM: + /* ta is none for UB native device */ + if (tp_node != NULL) + *ta = tp_node->ta; + break; + case UBCORE_TP_UM: + default: + break; + } +} + +static struct ubcore_nlmsg *ubcore_get_restore_tp_req(struct ubcore_tp *tp) +{ + uint32_t payload_len = + (uint32_t)sizeof(struct ubcore_nl_restore_tp_req); + struct ubcore_nl_restore_tp_req *restore; + struct ubcore_ta ta; + struct ubcore_nlmsg *req; + + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_CMD_RESTORE_TP_REQ; + restore = (struct ubcore_nl_restore_tp_req *)(void *)req->payload; + restore->trans_mode = tp->trans_mode; + restore->tpn = tp->tpn; + restore->peer_tpn = tp->peer_tpn; + restore->rx_psn = get_random_u32(); + + ubcore_get_ta_from_tp(&ta, tp); + ubcore_get_ta_data_from_ta(&ta, tp->ub_dev->transport_type, + &restore->ta); + + return req; +} + +static struct ubcore_nlmsg * +ubcore_get_restore_tp_response(struct ubcore_nlmsg *req, uint32_t rx_psn, + enum ubcore_nl_resp_status status) +{ + struct ubcore_nl_restore_tp_resp *restore_resp; + struct ubcore_nlmsg *resp = NULL; + + resp = ubcore_alloc_nlmsg(sizeof(struct ubcore_nl_restore_tp_resp), + &req->dst_eid, &req->src_eid); + if (resp == NULL) { + ubcore_log_err("Failed to alloc restore tp response"); + return NULL; + } + + resp->msg_type = UBCORE_CMD_RESTORE_TP_RESP; + resp->nlmsg_seq = req->nlmsg_seq; + resp->transport_type = req->transport_type; + restore_resp = (struct ubcore_nl_restore_tp_resp *)resp->payload; + + if (status == UBCORE_NL_RESP_FAIL) { + restore_resp->ret = UBCORE_NL_RESP_FAIL; + return resp; + } + + restore_resp->peer_rx_psn = rx_psn; + return resp; +} + +static int ubcore_restore_tp_to_reset(struct ubcore_device *dev, + struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + attr.state = UBCORE_TP_STATE_RESET; + ubcore_log_debug("restore tp to reset(mask): state: %u", mask.bs.state); + ubcore_log_debug("restore tp to reset(attr): state: %u", + (uint32_t)attr.state); + + if (ubcore_modify_tp_state(dev, tp, UBCORE_TP_STATE_RESET, &attr, + mask) != 0) + return -1; + + return 0; +} + +static int ubcore_restore_tp_to_rts(struct ubcore_device *dev, + struct ubcore_tp *tp, uint32_t rx_psn, + uint32_t tx_psn) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + mask.bs.rx_psn = 1; + mask.bs.tx_psn = 1; + + attr.state = UBCORE_TP_STATE_RTS; + attr.rx_psn = rx_psn; + attr.tx_psn = tx_psn; + ubcore_log_info( + "restore tp to rts(mask): state: %u, rx_psn: %u, tx_psn: %u", + mask.bs.state, mask.bs.rx_psn, mask.bs.tx_psn); + ubcore_log_info( + "restore tp to rts(attr): state: %u, rx_psn: %u, tx_psn: %u", + (uint32_t)attr.state, attr.rx_psn, attr.tx_psn); + + if (ubcore_modify_tp_state(dev, tp, UBCORE_TP_STATE_RTS, &attr, mask) != + 0) + return -1; + + tp->rx_psn = rx_psn; + tp->tx_psn = tx_psn; + + return 0; +} + +int ubcore_restore_tp_error_to_rtr(struct ubcore_device *dev, + struct ubcore_tp *tp, uint32_t rx_psn, + uint32_t tx_psn, uint16_t data_udp_start, + uint16_t ack_udp_start) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + /* Tp param problem + * 1. In driver, tp params are stored in tp ctx. In ubcore, tp params are divided to store + * tp cfg and tp attr. tp cfg stores params that are determined when create tp and will + * not be updated then. tp attr stores params that may be updated after tp creation. + * 2. During tp err restoration, driver reset tp ctx when transitioned to RESET. + * So, it needs to set all tp params when transitions to RTR. + * 3. Solution negotiated with driver: + * (1) For params in tp cfg, ubcore can't pass them down to driver in current restoration + * procedure. Driver needs to set these part of param by ubcore_tp *tp. + * (2) For params in tp attr, driver will set them by tp attr according to the mask. + * So, ubcore has to pass down all of them. Not only the updated. + */ + // set all bits in mask to pass down all params in tp attr + mask.value = UBCORE_TP_ATTR_MASK; + // updated params + attr.state = UBCORE_TP_STATE_RTR; + attr.rx_psn = rx_psn; + attr.tx_psn = tx_psn; + attr.data_udp_start = data_udp_start; + attr.ack_udp_start = ack_udp_start; + // not updated params. Set by ubcore_tp *tp + attr.flag.bs.oor_en = tp->flag.bs.oor_en; + attr.flag.bs.sr_en = tp->flag.bs.sr_en; + attr.flag.bs.cc_en = tp->flag.bs.cc_en; + attr.flag.bs.cc_alg = tp->flag.bs.cc_alg; + attr.flag.bs.spray_en = tp->flag.bs.spray_en; + attr.flag.bs.clan = tp->flag.bs.clan; + attr.peer_tpn = tp->peer_tpn; + attr.mtu = tp->mtu; + attr.cc_pattern_idx = tp->cc_pattern_idx; + attr.peer_ext = tp->peer_ext; + attr.oos_cnt = tp->oos_cnt; + attr.local_net_addr_idx = tp->local_net_addr_idx; + attr.peer_net_addr = tp->peer_net_addr; + attr.udp_range = tp->udp_range; + attr.hop_limit = tp->hop_limit; + attr.flow_label = tp->flow_label; + attr.port_id = tp->port_id; + attr.mn = tp->mn; + attr.peer_trans_type = tp->peer_trans_type; + + ubcore_log_info( + "restore tp to rtr(mask): state: %u, rx_psn: %u, tx_psn: %u, data_udp: %u, ack_udp: %u", + mask.bs.state, mask.bs.rx_psn, mask.bs.tx_psn, + mask.bs.data_udp_start, mask.bs.ack_udp_start); + ubcore_log_info( + "restore tp to rtr(attr): state: %u, rx_psn: %u, tx_psn: %u, data_udp: %u, ack_udp: %u", + (uint32_t)attr.state, attr.rx_psn, attr.tx_psn, + attr.data_udp_start, attr.ack_udp_start); + + if (ubcore_modify_tp_state(dev, tp, UBCORE_TP_STATE_RTR, &attr, mask) != + 0) + return -1; + + tp->rx_psn = rx_psn; + tp->tx_psn = tx_psn; + tp->data_udp_start = data_udp_start; + tp->ack_udp_start = ack_udp_start; + + return 0; +} + +int ubcore_restore_tp_error_to_rts(struct ubcore_device *dev, + struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + + attr.state = UBCORE_TP_STATE_RTS; + + ubcore_log_info("restore tp to rts, state mask: %u state: %u", + mask.bs.state, (uint32_t)attr.state); + + if (ubcore_modify_tp_state(dev, tp, UBCORE_TP_STATE_RTS, &attr, mask) != + 0) + return -1; + + return 0; +} + +int ubcore_change_tp_to_err(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + union ubcore_tp_attr_mask mask; + struct ubcore_tp_attr attr; + + mask.value = 0; + mask.bs.state = 1; + + attr.state = UBCORE_TP_STATE_ERR; + + if (ubcore_modify_tp_state(dev, tp, UBCORE_TP_STATE_ERR, &attr, mask) != + 0) + return -1; + + return 0; +} + +void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_nlmsg *req_msg, *resp_msg; + struct ubcore_uvs_instance *uvs = NULL; + struct ubcore_nl_restore_tp_resp *resp; + struct ubcore_nl_restore_tp_req *req; + + /* Currently, only try to restore tp in the UBCORE_TRANSPORT_HNS_UB device, + * Do not send retore tp req from target to inititor, + * Do not restore UM TP, as it is only visable by the driver + */ + if (!ubcore_have_tp_ops(dev) || tp == NULL || + tp->flag.bs.target || tp->priv == NULL || + tp->trans_mode == UBCORE_TP_UM || + tp->state != UBCORE_TP_STATE_ERR || tp->ub_dev == NULL) + return; + + req_msg = ubcore_get_restore_tp_req(tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get restore tp req"); + return; + } + + uvs = ubcore_find_get_uvs_by_ue(dev, tp->ue_idx); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs for ue %u", tp->ue_idx); + kfree(req_msg); + return; + } + + resp_msg = ubcore_nl_send_wait(dev, req_msg, uvs); + if (resp_msg == NULL) { + ubcore_log_err("Failed to wait restore tp response %pI6c", + &tp->peer_eid); + ubcore_uvs_kref_put(uvs); + kfree(req_msg); + return; + } + + req = (struct ubcore_nl_restore_tp_req *)(void *)req_msg->payload; + resp = (struct ubcore_nl_restore_tp_resp *)(void *)resp_msg->payload; + if (resp_msg->msg_type != req_msg->msg_type + 1 || resp == NULL || + resp->ret != UBCORE_NL_RESP_SUCCESS) { + ubcore_log_err( + "Restore tp request is rejected with type %d from uvs %s, id %u, ret %d", + resp_msg->msg_type, uvs->name, uvs->id, + (resp == NULL ? 1 : resp->ret)); + ubcore_uvs_kref_put(uvs); + kfree(resp_msg); + kfree(req_msg); + return; + } + + if (ubcore_restore_tp_to_rts(dev, tp, req->rx_psn, resp->peer_rx_psn) != + 0) + ubcore_log_err("Failed to restore tp with tpn %u", tp->tpn); + + ubcore_uvs_kref_put(uvs); + kfree(req_msg); + kfree(resp_msg); + ubcore_log_info("Restored tp with tpn %u", tp->tpn); +} +EXPORT_SYMBOL(ubcore_restore_tp); + +static struct ubcore_nlmsg * +ubcore_get_tp_flush_done_req(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + uint32_t payload_len = + (uint32_t)sizeof(struct ubcore_tp_flush_done_req); + struct ubcore_tp_flush_done_req *flush_done_req; + struct ubcore_nlmsg *req; + + if (tp->ub_dev == NULL) { + ubcore_log_err("Invalid parameter, tpn: %u", tp->tpn); + return NULL; + } + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_CMD_TP_FLUSH_DONE_REQ; + flush_done_req = + (struct ubcore_tp_flush_done_req *)(void *)req->payload; + flush_done_req->tpn = tp->tpn; + flush_done_req->data_udp_start = tp->data_udp_start; + flush_done_req->ack_udp_start = tp->ack_udp_start; + flush_done_req->tx_psn = tp->tx_psn; + flush_done_req->peer_tpn = tp->peer_tpn; + flush_done_req->trans_mode = tp->trans_mode; + flush_done_req->sip_idx = tp->local_net_addr_idx; + if (tp->tpg) { + flush_done_req->tpgn = tp->tpg->tpgn; + flush_done_req->sip = tp->tpg->tpg_cfg.local_net_addr; + } + + flush_done_req->local_eid = tp->local_eid; + flush_done_req->peer_eid = tp->peer_eid; + ubcore_log_debug( + "report tp flush done: tx_psn: %u, data_udp: %u, ack_udp: %u", + tp->tx_psn, tp->data_udp_start, tp->ack_udp_start); + if (tp->trans_mode == UBCORE_TP_RC) { + flush_done_req->local_jetty_id = tp->local_jetty.id; + flush_done_req->peer_jetty_id = tp->peer_jetty.id; + } + (void)memcpy(flush_done_req->mue_dev_name, dev->dev_name, + UBCORE_MAX_DEV_NAME); + + return req; +} + +static void ubcore_report_tp_flush_done_to_uvs(struct ubcore_nlmsg *req_msg, + struct ubcore_uvs_instance *uvs) +{ + int ret; + + ret = ubcore_nl_send_nowait_without_cb(req_msg, uvs); + if (ret != 0) + ubcore_log_err("Failed to nowait send tp error request to %s\n", + uvs->name); + else + ubcore_log_debug( + "Success to nowait send tp error request to %s\n", + uvs->name); +} + +void ubcore_report_tp_flush_done(struct ubcore_device *dev, + struct ubcore_tp *tp) +{ + struct ubcore_uvs_instance **uvs_list = NULL; + struct ubcore_nlmsg *req_msg; + int count = 0; + int i; + + if (tp->state != UBCORE_TP_STATE_RESET) { + /* udma flushes without changing tp to ERR when qpc err. Thus, ubcore needs to + * change tp to ERR first. + */ + if (ubcore_change_tp_to_err(dev, tp) != 0) { + ubcore_log_err("Failed to change tp: %u to err", + tp->tpn); + return; + } + + /* + * For 1650, modifying TP from ERR -> RESET is not supported. + * The result will not affect TP destroy. + */ + if (ubcore_restore_tp_to_reset(dev, tp) != 0) + ubcore_log_err("Failed to restore tp: %u to reset", + tp->tpn); + } + + req_msg = ubcore_get_tp_flush_done_req(dev, tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get tp flush done req"); + return; + } + + /* Due to async reason, ue2uvs mapping is possibly already removed at this point. + * In this case, we should try to send flush done request to all UVS to ensure + * flush done count can be decreased correctly in UVS. + */ + uvs_list = ubcore_uvs_list_get_all_alive(&count); + if (uvs_list == NULL || count == 0) { + ubcore_log_err("Failed to find uvs for ue %u", tp->ue_idx); + kfree(req_msg); + return; + } + + for (i = 0; i < count; i++) + ubcore_report_tp_flush_done_to_uvs(req_msg, uvs_list[i]); + + ubcore_uvs_list_put(uvs_list, count); + kfree(req_msg); +} + +static struct ubcore_nlmsg *ubcore_get_tp_suspend_req(struct ubcore_device *dev, + struct ubcore_tp *tp) +{ + uint32_t payload_len = (uint32_t)sizeof(struct ubcore_tp_suspend_req); + struct ubcore_tp_suspend_req *suspend_req; + struct ubcore_nlmsg *req; + + /* tp is checked in outer function */ + if (tp->ub_dev == NULL || tp->tpg == NULL) { + ubcore_log_err("Invalid parameter, tpn: %u.\n", tp->tpn); + return NULL; + } + req = ubcore_alloc_nlmsg(payload_len, &tp->local_eid, &tp->peer_eid); + if (req == NULL) + return NULL; + + req->transport_type = tp->ub_dev->transport_type; + req->msg_type = UBCORE_CMD_TP_SUSPEND_REQ; + suspend_req = (struct ubcore_tp_suspend_req *)(void *)req->payload; + suspend_req->tpgn = tp->tpg->tpgn; + suspend_req->tpn = tp->tpn; + suspend_req->data_udp_start = tp->data_udp_start; + suspend_req->ack_udp_start = tp->ack_udp_start; + suspend_req->sip_idx = tp->local_net_addr_idx; + ubcore_log_info( + "report tp suspend: data_udp_start: %u, ack_udp_start: %u", + tp->data_udp_start, tp->ack_udp_start); + (void)memcpy(suspend_req->mue_dev_name, dev->dev_name, + UBCORE_MAX_DEV_NAME); + + return req; +} + +void ubcore_report_tp_suspend(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + struct ubcore_uvs_instance *uvs = NULL; + struct ubcore_nlmsg *req_msg; + int ret; + + req_msg = ubcore_get_tp_suspend_req(dev, tp); + if (req_msg == NULL) { + ubcore_log_err("Failed to get tp suspend req"); + return; + } + + uvs = ubcore_find_get_uvs_by_ue(dev, tp->ue_idx); + if (uvs == NULL) { + ubcore_log_err("Failed to find uvs for ue %u", tp->ue_idx); + kfree(req_msg); + return; + } + + ret = ubcore_nl_send_nowait_without_cb(req_msg, uvs); + if (ret) + ubcore_log_err("Failed to nowait send tp suspend request"); + else + ubcore_log_info("Success to nowait send tp suspend request"); + + ubcore_uvs_kref_put(uvs); + kfree(req_msg); +} + +void ubcore_put_ta_jetty(struct ubcore_ta *ta) +{ + if (ta->type == UBCORE_TA_JFS_TJFR) + ubcore_put_jfs(ta->jfs); + else if (ta->type == UBCORE_TA_JETTY_TJETTY) + ubcore_put_jetty(ta->jetty); +} + +void ubcore_put_target_ta_jetty(struct ubcore_ta *ta) +{ + if (ta->type == UBCORE_TA_JFS_TJFR) + ubcore_put_jfr(ta->jfr); + else if (ta->type == UBCORE_TA_JETTY_TJETTY) + ubcore_put_jetty(ta->jetty); +} + +/* restore target RM tp created by ubcore_advise_target_tp */ +static int +ubcore_restore_advised_target_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore, + uint32_t *rx_psn) +{ + struct ubcore_tp_meta meta = { 0 }; + struct ubcore_tp_node *tp_node; + struct ubcore_tp *tp; + + if (ubcore_parse_ta(dev, &restore->ta, &meta) != 0) { + ubcore_log_err("Failed to parse ta with type %u", + (uint32_t)restore->ta.ta_type); + return -1; + } else if (meta.ht == NULL) { + ubcore_log_info("tp table is already released"); + return -1; + } + + spin_lock(&meta.ht->lock); + tp_node = + ubcore_hash_table_lookup_nolock(meta.ht, meta.hash, &meta.key); + /* pair with get_tptable in parse_ta */ + ubcore_put_tptable(meta.ht); + if (tp_node == NULL) { + spin_unlock(&meta.ht->lock); + ubcore_log_err("tp is not found%u", restore->peer_tpn); + return -1; + } + + tp = tp_node->tp; + if (ubcore_restore_tp_to_rts(dev, tp, get_random_u32(), + restore->rx_psn) != 0) { + spin_unlock(&meta.ht->lock); + ubcore_log_err("Failed to modify tp to rts %u", + restore->rx_psn); + return -1; + } + *rx_psn = tp->rx_psn; + spin_unlock(&meta.ht->lock); + return 0; +} + +static int +ubcore_restore_bound_target_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore, + uint32_t *rx_psn) +{ + return ubcore_restore_advised_target_tp(dev, restore, rx_psn); +} + +static int ubcore_handle_restore_tp(struct ubcore_device *dev, + struct ubcore_nl_restore_tp_req *restore, + uint32_t *rx_psn) +{ + if (restore == NULL || + restore->trans_mode == UBCORE_TP_UM || + restore->ta.ta_type == UBCORE_TA_NONE || + restore->ta.ta_type >= UBCORE_TA_VIRT) + return -1; + + if (restore->trans_mode == UBCORE_TP_RM) + return ubcore_restore_advised_target_tp(dev, restore, rx_psn); + else + return ubcore_restore_bound_target_tp(dev, restore, rx_psn); +} + +struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req) +{ + enum ubcore_nl_resp_status status = UBCORE_NL_RESP_SUCCESS; + struct ubcore_nl_restore_tp_req *restore; + struct ubcore_device *dev; + uint32_t rx_psn = 0; + int ret = 0; + + if (req == NULL || + req->payload_len != sizeof(struct ubcore_nl_restore_tp_req)) { + ubcore_log_err("Invalid restore req"); + return NULL; + } + + restore = (struct ubcore_nl_restore_tp_req *)(void *)req->payload; + dev = ubcore_find_device(&req->dst_eid, req->transport_type); + if (!ubcore_have_tp_ops(dev)) { + if (dev != NULL) + ubcore_put_device(dev); + ubcore_log_err("Failed to find device or device ops invalid"); + return ubcore_get_restore_tp_response(req, rx_psn, + UBCORE_NL_RESP_FAIL); + } + + ret = ubcore_handle_restore_tp(dev, restore, &rx_psn); + if (ret != 0) { + ubcore_log_err( + "Failed to restore target tp towards remote eid %pI6c", + &req->src_eid); + status = UBCORE_NL_RESP_FAIL; + } + + ubcore_put_device(dev); + return ubcore_get_restore_tp_response(req, rx_psn, status); +} +EXPORT_SYMBOL(ubcore_handle_restore_tp_req); + +int ubcore_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->get_tp_list == NULL || + cfg == NULL || tp_cnt == NULL || tp_list == NULL || *tp_cnt == 0) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", + (int)cfg->trans_mode); + return -EINVAL; + } + + ret = dev->ops->get_tp_list(dev, cfg, tp_cnt, tp_list, udata); + if (ret != 0) + ubcore_log_err("Failed to get to list, ret: %d.\n", ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_get_tp_list); + +int ubcore_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->set_tp_attr == NULL || + tp_attr == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ret = dev->ops->set_tp_attr(dev, tp_handle, tp_attr_cnt, tp_attr_bitmap, + tp_attr, udata); + if (ret != 0) + ubcore_log_err("Failed to set tp attributions, ret: %d.\n", + ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_set_tp_attr); + +int ubcore_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->get_tp_attr == NULL || + tp_attr_cnt == NULL || tp_attr_bitmap == NULL || tp_attr == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ret = dev->ops->get_tp_attr(dev, tp_handle, tp_attr_cnt, tp_attr_bitmap, + tp_attr, udata); + if (ret != 0) + ubcore_log_err("Failed to get tp attributions, ret: %d.\n", + ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_get_tp_attr); diff --git a/drivers/ub/urma/ubcore/ubcore_tp.h b/drivers/ub/urma/ubcore/ubcore_tp.h new file mode 100644 index 000000000000..d86d09f4ef12 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp header + * Author: Yan Fangfang + * Create: 2022-09-08 + * Note: + * History: 2022-09-208: Create file + */ + +#ifndef UBCORE_TP_H +#define UBCORE_TP_H + +#include +#include "ubcore_tp_table.h" +#include "ubcore_netlink.h" + +struct ubcore_tp_meta { + struct ubcore_hash_table *ht; + uint32_t hash; + struct ubcore_tp_key key; +}; + +struct ubcore_tp_advice { + struct ubcore_ta ta; + struct ubcore_tp_meta meta; +}; + +static inline bool ubcore_have_ops(struct ubcore_device *dev) +{ + return (dev != NULL && dev->ops != NULL); +} + +static inline bool ubcore_have_tp_ops(struct ubcore_device *dev) +{ + return (dev != NULL && dev->ops != NULL && + dev->ops->create_tp != NULL && dev->ops->modify_tp != NULL); +} + +static inline bool ubcore_have_tp_ctrlplane_ops(struct ubcore_device *dev) +{ + return (dev != NULL && dev->ops != NULL && + dev->ops->get_tp_list != NULL && dev->ops->active_tp != NULL); +} + +/* alpha */ +int ubcore_advise_tp(struct ubcore_device *dev, union ubcore_eid *remote_eid, + struct ubcore_tp_advice *advice, + struct ubcore_udata *udata); +int ubcore_unadvise_tp(struct ubcore_device *dev, + struct ubcore_tp_advice *advice); + +struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req); + +/* bind tp APIs */ +int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice, struct ubcore_udata *udata); +int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_tp_advice *advice); + +/* Called when clear tp table */ +int ubcore_destroy_tp(struct ubcore_tp *tp); + +/* restore tp from error state */ +void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp); +int ubcore_restore_tp_error_to_rtr(struct ubcore_device *dev, + struct ubcore_tp *tp, uint32_t rx_psn, + uint32_t tx_psn, uint16_t data_udp_start, + uint16_t ack_udp_start); +int ubcore_restore_tp_error_to_rts(struct ubcore_device *dev, + struct ubcore_tp *tp); +int ubcore_change_tp_to_err(struct ubcore_device *dev, struct ubcore_tp *tp); + +void ubcore_report_tp_suspend(struct ubcore_device *dev, struct ubcore_tp *tp); +void ubcore_report_tp_flush_done(struct ubcore_device *dev, + struct ubcore_tp *tp); + +void ubcore_modify_tp_attr(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); +int ubcore_modify_tp_state_check(struct ubcore_tp *tp, + enum ubcore_tp_state new_state); +void ubcore_tp_get(void *obj); +void ubcore_tp_kref_put(struct ubcore_tp *tp); +void ubcore_put_ta_jetty(struct ubcore_ta *ta); +void ubcore_put_target_ta_jetty(struct ubcore_ta *ta); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.c b/drivers/ub/urma/ubcore/ubcore_tp_table.c new file mode 100644 index 000000000000..2621c0909ba1 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp table implementation + * Author: Yan Fangfang + * Create: 2023-02-09 + * Note: + * History: 2023-02-09: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_priv.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, + const struct ubcore_jetty_id *jetty_id) +{ + memset(key, 0, sizeof(struct ubcore_tp_key)); + key->key_type = UBCORE_TP_KEY_JETTY_ID; + key->jetty_id = *jetty_id; +} + +void ubcore_remove_tp_node(struct ubcore_hash_table *ht, + struct ubcore_tp_node *tp_node) +{ + if (tp_node == NULL) + return; + + ubcore_hash_table_remove(ht, &tp_node->hnode); + kfree(tp_node); +} + +static void ubcore_tpnode_kref_release(struct kref *ref_cnt) +{ + struct ubcore_tp_node *tp_node = + container_of(ref_cnt, struct ubcore_tp_node, ref_cnt); + + complete(&tp_node->comp); +} + +void ubcore_tpnode_kref_put(struct ubcore_tp_node *tp_node) +{ + if (tp_node == NULL) + return; + + (void)kref_put(&tp_node->ref_cnt, ubcore_tpnode_kref_release); +} + +/* Find and remove the tp from table only if it is unreferenced */ +void ubcore_find_remove_tp(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key) +{ + struct ubcore_tp_node *tp_node; + struct ubcore_tp *tp = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + tp_node = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (tp_node == NULL) { + spin_unlock(&ht->lock); + return; + } + hlist_del_init(&tp_node->hnode); + spin_unlock(&ht->lock); + + ubcore_tpnode_kref_put(tp_node); + wait_for_completion(&tp_node->comp); + + tp = tp_node->tp; + mutex_destroy(&tp_node->lock); + kfree(tp_node); + (void)ubcore_destroy_tp(tp); +} + +struct ubcore_hash_table *ubcore_create_tptable(void) +{ + struct ubcore_ht_param p = { + .size = UBCORE_TP_TABLE_SIZE, + .node_offset = offsetof(struct ubcore_tp_node, hnode), + .key_offset = offsetof(struct ubcore_tp_node, key), + .key_size = (uint32_t)sizeof(struct ubcore_tp_key), + .cmp_f = NULL, + .free_f = NULL + }; + struct ubcore_hash_table *htable; + + htable = kcalloc(1, sizeof(struct ubcore_hash_table), GFP_KERNEL); + if (htable == NULL) + return NULL; + + if (ubcore_hash_table_alloc(htable, &p) != 0) { + kfree(htable); + return NULL; + } + return htable; +} + +static void ubcore_free_tp_node(void *obj) +{ + struct ubcore_tp_node *tp_node = (struct ubcore_tp_node *)obj; + + ubcore_tpnode_kref_put(tp_node); + wait_for_completion(&tp_node->comp); + + (void)ubcore_destroy_tp(tp_node->tp); + kfree(tp_node); +} + +static void ubcore_tptable_release(struct kref *kref) +{ + struct ubcore_hash_table *ht = + container_of(kref, struct ubcore_hash_table, kref); + + kfree(ht); +} + +void ubcore_destroy_tptable(struct ubcore_hash_table **pp_ht) +{ + struct ubcore_hash_table *ht; + + if (pp_ht == NULL || *pp_ht == NULL) + return; + + ht = *pp_ht; + *pp_ht = NULL; + ubcore_hash_table_free_with_cb(ht, ubcore_free_tp_node); + /* pair with kref_init */ + (void)kref_put(&ht->kref, ubcore_tptable_release); +} + +struct ubcore_hash_table *ubcore_get_tptable(struct ubcore_hash_table *ht) +{ + if (ht == NULL) + return NULL; + + kref_get(&ht->kref); + return ht; +} + +void ubcore_put_tptable(struct ubcore_hash_table *ht) +{ + if (ht == NULL) + return; + + (void)kref_put(&ht->kref, ubcore_tptable_release); +} + +struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, + uint32_t hash, + const struct ubcore_tp_key *key, + struct ubcore_tp *tp, + struct ubcore_ta *ta) +{ + struct ubcore_tp_node *new_tp_node; + struct ubcore_tp_node *tp_node; + + new_tp_node = kzalloc(sizeof(struct ubcore_tp_node), GFP_KERNEL); + if (new_tp_node == NULL) + return NULL; + + new_tp_node->key = *key; + new_tp_node->tp = tp; + new_tp_node->ta = *ta; + mutex_init(&new_tp_node->lock); + kref_init(&new_tp_node->ref_cnt); + init_completion(&new_tp_node->comp); + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + mutex_destroy(&new_tp_node->lock); + kfree(new_tp_node); + return NULL; + } + tp_node = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (tp_node != NULL) { + kref_get(&tp_node->ref_cnt); + spin_unlock(&ht->lock); + mutex_destroy(&new_tp_node->lock); + kfree(new_tp_node); + return tp_node; + } + + ubcore_hash_table_add_nolock(ht, &new_tp_node->hnode, hash); + /* set private data for tp restore */ + tp->priv = new_tp_node; + kref_get(&new_tp_node->ref_cnt); + spin_unlock(&ht->lock); + return new_tp_node; +} + +struct ubcore_tp_node *ubcore_lookup_tpnode(struct ubcore_hash_table *ht, + uint32_t hash, + const struct ubcore_tp_key *key) +{ + struct ubcore_tp_node *tp_node = NULL; + + spin_lock(&ht->lock); + tp_node = ubcore_hash_table_lookup_nolock(ht, hash, key); + if (tp_node != NULL) + kref_get(&tp_node->ref_cnt); + spin_unlock(&ht->lock); + return tp_node; +} diff --git a/drivers/ub/urma/ubcore/ubcore_tp_table.h b/drivers/ub/urma/ubcore/ubcore_tp_table.h new file mode 100644 index 000000000000..c9b02a8a1d51 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp_table.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tp table header + * Author: Yan Fangfang + * Create: 2023-02-09 + * Note: + * History: 2023-02-09: Create file + */ + +#ifndef UBCORE_TP_TABLE_H +#define UBCORE_TP_TABLE_H + +#include "ubcore_hash_table.h" +#include "ubcore_netlink.h" + +/* Only jetty with RC mode needs tp_table currently */ +#define UBCORE_TP_TABLE_SIZE 16 +enum ubcore_tp_key_type { UBCORE_TP_KEY_JETTY_ID, UBCORE_TP_KEY_TPN }; + +struct ubcore_tp_key { + enum ubcore_tp_key_type key_type; + union { + struct ubcore_jetty_id + jetty_id; /* for initiator tp towards target jfr or jetty */ + uint32_t tpn; /* for target tp */ + }; +} __packed; + +struct ubcore_tp_node { + struct ubcore_tp_key key; + struct ubcore_tp *tp; + struct ubcore_ta ta; + struct hlist_node hnode; + struct mutex lock; + struct kref ref_cnt; + struct completion comp; +}; + +void ubcore_init_tp_key_jetty_id(struct ubcore_tp_key *key, + const struct ubcore_jetty_id *jetty_id); + +/* Return old tp node if key already exists */ +struct ubcore_tp_node *ubcore_add_tp_node(struct ubcore_hash_table *ht, + uint32_t hash, + const struct ubcore_tp_key *key, + struct ubcore_tp *tp, + struct ubcore_ta *ta); +void ubcore_remove_tp_node(struct ubcore_hash_table *ht, + struct ubcore_tp_node *tp_node); +/* Find and remove the tp from table only if it is unreferenced */ +void ubcore_find_remove_tp(struct ubcore_hash_table *ht, uint32_t hash, + const struct ubcore_tp_key *key); + +struct ubcore_tp_node *ubcore_lookup_tpnode(struct ubcore_hash_table *ht, + uint32_t hash, + const struct ubcore_tp_key *key); +void ubcore_tpnode_kref_put(struct ubcore_tp_node *tp_node); + +/* TP table ops for devices that do not natively support RM */ +struct ubcore_hash_table *ubcore_create_tptable(void); +void ubcore_destroy_tptable(struct ubcore_hash_table **pp_ht); +struct ubcore_hash_table *ubcore_get_tptable(struct ubcore_hash_table *ht); +void ubcore_put_tptable(struct ubcore_hash_table *ht); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_tpg.c b/drivers/ub/urma/ubcore/ubcore_tpg.c new file mode 100644 index 000000000000..0451ee7b7a6c --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tpg.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tpg implementation + * Author: Yan Fangfang + * Create: 2023-07-17 + * Note: + * History: 2023-07-17: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tpg.h" + +struct ubcore_tpg *ubcore_create_tpg(struct ubcore_device *dev, + struct ubcore_tpg_cfg *cfg) +{ + struct ubcore_tpg *tpg; + uint32_t i; + + if (dev->ops == NULL || dev->ops->create_tpg == NULL) + return ERR_PTR(-EINVAL); + + tpg = dev->ops->create_tpg(dev, cfg, NULL); + if (IS_ERR_OR_NULL(tpg)) { + ubcore_log_err("Failed to create tpg"); + if (tpg == NULL) + return ERR_PTR(-ENOEXEC); + return tpg; + } + tpg->ub_dev = dev; + tpg->tpg_cfg = *cfg; + for (i = 0; i < cfg->tp_cnt; i++) + tpg->tp_list[i] = NULL; + + kref_init(&tpg->ref_cnt); + mutex_init(&tpg->mutex); + + return tpg; +} + +void ubcore_tpg_get(void *obj) +{ + struct ubcore_tpg *tpg = obj; + + if (!tpg) + return; + ubcore_log_debug("get tpg, tpgn:%u, in dev:%s, refcnt to: %u", + tpg->tpgn, tpg->ub_dev->dev_name, + kref_read(&tpg->ref_cnt) + 1); + kref_get(&tpg->ref_cnt); +} + +static void ubcore_destroy_tpg(struct ubcore_tpg *tpg) +{ + struct ubcore_device *dev = tpg->ub_dev; + int ret; + + if (dev->ops == NULL || dev->ops->destroy_tpg == NULL) + return; + + ret = dev->ops->destroy_tpg(tpg); + if (ret != 0) + ubcore_log_err("destrory tpg err:%d", ret); +} + +static void ubcore_tpg_kref_release(struct kref *ref_cnt) +{ + struct ubcore_tpg *tpg = + container_of(ref_cnt, struct ubcore_tpg, ref_cnt); + + ubcore_destroy_tpg(tpg); +} + +void ubcore_tpg_kref_put(struct ubcore_tpg *tpg) +{ + if (!tpg) + return; + + ubcore_log_debug("put tpg, tpgn:%u, in dev:%s, refcnt to: %u", + tpg->tpgn, tpg->ub_dev->dev_name, + kref_read(&tpg->ref_cnt) - 1); + (void)kref_put(&tpg->ref_cnt, ubcore_tpg_kref_release); +} + +void ubcore_tpg_kref_get(struct ubcore_tpg *tpg) +{ + if (!tpg) + return; + kref_get(&tpg->ref_cnt); +} + +struct ubcore_tpg *ubcore_find_get_tpg(struct ubcore_device *dev, uint32_t tpgn) +{ + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_TPG], tpgn, + &tpgn); +} + +int ubcore_find_remove_tpg(struct ubcore_device *dev, uint32_t tpgn) +{ + struct ubcore_tpg *tpg; + + spin_lock(&dev->ht[UBCORE_HT_TPG].lock); + if (dev->ht[UBCORE_HT_TPG].head == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_TPG].lock); + return -1; + } + tpg = ubcore_hash_table_lookup_nolock(&dev->ht[UBCORE_HT_TPG], tpgn, + &tpgn); + if (tpg == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_TPG].lock); + return -1; + } + ubcore_hash_table_remove_nolock(&dev->ht[UBCORE_HT_TPG], &tpg->hnode); + ubcore_tpg_kref_put(tpg); + spin_unlock(&dev->ht[UBCORE_HT_TPG].lock); + return 0; +} + +int ubcore_add_tp(struct ubcore_device *dev, struct ubcore_tp *tp) +{ + return ubcore_hash_table_find_add(&dev->ht[UBCORE_HT_TP], &tp->hnode, + tp->tpn); +} + +struct ubcore_tp *ubcore_find_get_tp(struct ubcore_device *dev, uint32_t tpn) +{ + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_TP], tpn, &tpn); +} + +struct ubcore_tp *ubcore_find_remove_tp_node(struct ubcore_device *dev, + uint32_t tpn) +{ + struct ubcore_tp *tp; + + spin_lock(&dev->ht[UBCORE_HT_TP].lock); + if (dev->ht[UBCORE_HT_TP].head == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_TP].lock); + return NULL; + } + tp = ubcore_hash_table_lookup_nolock(&dev->ht[UBCORE_HT_TP], tpn, &tpn); + if (tp == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_TP].lock); + return NULL; + } + + if (atomic_dec_return(&tp->use_cnt) > 0) { + spin_unlock(&dev->ht[UBCORE_HT_TP].lock); + ubcore_log_warn("Failed to remove tp:%u and use cnt:%u", + tp->tpn, (uint32_t)atomic_read(&tp->use_cnt)); + return NULL; + } + ubcore_hash_table_remove_nolock(&dev->ht[UBCORE_HT_TP], &tp->hnode); + spin_unlock(&dev->ht[UBCORE_HT_TP].lock); + + return tp; +} + +static void ubcore_set_tp_init_flag(union ubcore_tp_flag *flag, + union ubcore_tp_cfg_flag in) +{ + flag->bs.target = in.bs.target; + flag->bs.loopback = in.bs.loopback; + flag->bs.ack_resp = in.bs.ack_resp; + flag->bs.bonding = in.bs.bonding; + flag->bs.dca_enable = in.bs.dca_enable; +} + +/* todonext: merge with the function in tp.c */ +static void ubcore_store_tp_init_cfg(struct ubcore_tpg *tpg, + struct ubcore_tp *tp, + struct ubcore_device *dev, + const struct ubcore_tp_cfg *cfg) +{ + ubcore_set_tp_init_flag(&tp->flag, cfg->flag); + tp->local_jetty = cfg->local_jetty; + tp->peer_jetty = cfg->peer_jetty; + tp->trans_mode = cfg->trans_mode; + tp->retry_num = cfg->retry_num; + tp->ack_timeout = cfg->ack_timeout; + tp->retry_factor = cfg->retry_factor; + tp->dscp = cfg->dscp; + tp->oor_cnt = cfg->oor_cnt; + + tp->ub_dev = dev; + tp->state = UBCORE_TP_STATE_RESET; + tp->tpg = tpg; + tp->priv = NULL; + atomic_set(&tp->use_cnt, 1); + tp->ue_idx = cfg->ue_idx; +} + +int ubcore_create_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_cfg *cfg) +{ + struct ubcore_tp *tp[UBCORE_MAX_TP_CNT_IN_GRP] = { 0 }; + uint32_t tp_cnt = tpg->tpg_cfg.tp_cnt; + int created_tp_cnt; + uint32_t i; + uint32_t j; + int ret; + + if (dev->ops == NULL || dev->ops->create_multi_tp == NULL || + dev->ops->destroy_multi_tp == NULL || + tp_cnt > UBCORE_MAX_TP_CNT_IN_GRP) + return -EINVAL; + + created_tp_cnt = dev->ops->create_multi_tp(dev, tp_cnt, cfg, NULL, tp); + if (created_tp_cnt != (int)tp_cnt) { + if (created_tp_cnt > 0) + (void)dev->ops->destroy_multi_tp(created_tp_cnt, tp); + ubcore_log_err("Failed to create multi tp"); + return -ENOSPC; + } + + /* add tp to tpg */ + for (i = 0; i < tp_cnt; i++) { + struct ubcore_tp *single_tp = tp[i]; + + if (single_tp == NULL) { + ubcore_log_warn("create multi tp, buf tp is null_ptr"); + continue; + } + kref_init(&single_tp->ref_cnt); + init_completion(&single_tp->comp); + /* tpg pointer is filled in each tp, + * so kref of tpg should be increased for each tp + */ + ubcore_tpg_kref_get(tpg); + ubcore_store_tp_init_cfg(tpg, single_tp, dev, &cfg[i]); + mutex_init(&single_tp->lock); + tpg->tp_list[i] = single_tp; + + ret = ubcore_add_tp(dev, single_tp); + if (ret != 0) { + ubcore_log_err( + "Failed to add tp:%u to the tp table and tpg:%u, dev:%s", + single_tp->tpn, tpg->tpgn, dev->dev_name); + for (j = 0; j < i; j++) { + (void)ubcore_find_remove_tp_node(dev, + tp[j]->tpn); + /* tp->tpg should not be NULL before udma destroy tp */ + ubcore_tpg_kref_put(tpg); + } + + (void)dev->ops->destroy_multi_tp(created_tp_cnt, tp); + ubcore_log_err("Failed to create multi tp"); + return -EPERM; + } + ubcore_log_debug( + "success to add tp:%u to the tp table and tpg:%u, dev:%s", + single_tp->tpn, tpg->tpgn, dev->dev_name); + } + + return 0; +} + +int ubcore_destroy_multi_tp_from_index(struct ubcore_device *dev, + struct ubcore_tpg *tpg, + uint32_t from_index) +{ + struct ubcore_tp *tp[UBCORE_MAX_TP_CNT_IN_GRP]; + struct ubcore_tp *single_tp; + uint32_t tp_cnt = 0; + int ret; + int i; + + if (dev->ops == NULL || dev->ops->destroy_multi_tp == NULL) + return 0; + + mutex_lock(&tpg->mutex); + if (from_index >= tpg->tpg_cfg.tp_cnt) { + mutex_unlock(&tpg->mutex); + return 0; + } + + tp_cnt = tpg->tpg_cfg.tp_cnt - from_index; + for (i = 0; i < (int)tp_cnt; i++) { + tp[i] = tpg->tp_list[i + (int)from_index]; + tpg->tp_list[i + (int)from_index] = NULL; + ubcore_log_debug("tp set null %d", i + (int)from_index); + } + tpg->tpg_cfg.tp_cnt = from_index; + mutex_unlock(&tpg->mutex); + + for (i = 0; i < (int)tp_cnt; i++) { + ubcore_tp_kref_put(tp[i]); + wait_for_completion(&tp[i]->comp); + mutex_destroy(&tp[i]->lock); + + single_tp = ubcore_find_remove_tp_node(dev, tp[i]->tpn); + if (single_tp == NULL) + ubcore_log_err( + "failed to find tp with tpn %u and tpgn %u, dev:%s", + tp[i]->tpn, tp[i]->tpg->tpgn, dev->dev_name); + /* tp->tpg should not be NULL before udma destroy tp */ + ubcore_tpg_kref_put(tpg); + } + + /* todonext: modify to error, and reset first */ + ret = dev->ops->destroy_multi_tp(tp_cnt, tp); + if (ret != (int)tp_cnt) { + ubcore_log_err("Failed to destroy multi tp_cnt:%u, ret: %d", + tp_cnt, ret); + return -EINVAL; + } + + return 0; +} + +int ubcore_destroy_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg) +{ + return ubcore_destroy_multi_tp_from_index(dev, tpg, 0); +} + +uint32_t ubcore_modify_tp_in_tpg(struct ubcore_device *dev, + struct ubcore_tpg *tpg, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + struct ubcore_tp **failed_tp) +{ + struct ubcore_tp *tp_list[UBCORE_MAX_TP_CNT_IN_GRP] = {}; + uint32_t tp_cnt = 0; + int ret; + int i; + + if (dev->ops == NULL || dev->ops->modify_multi_tp == NULL) + return 0; + + mutex_lock(&tpg->mutex); + tp_cnt = tpg->tpg_cfg.tp_cnt; + if (tp_cnt > UBCORE_MAX_TP_CNT_IN_GRP) + tp_cnt = UBCORE_MAX_TP_CNT_IN_GRP; + for (i = 0; i < (int)tp_cnt; i++) { + tp_list[i] = tpg->tp_list[i]; + ubcore_tp_get(tp_list[i]); + } + mutex_unlock(&tpg->mutex); + + ret = dev->ops->modify_multi_tp(tp_cnt, tp_list, attr, mask, failed_tp); + if (ret != (int)tp_cnt) + ubcore_log_err("Failed to modify multi tp %d and tpgn %u ", ret, + tpg->tpgn); + + for (i = 0; i < ret && i < UBCORE_MAX_TP_CNT_IN_GRP; i++) + ubcore_modify_tp_attr(tp_list[i], &attr[i], mask[i]); + + for (i = 0; i < (int)tp_cnt; i++) + ubcore_tp_kref_put(tp_list[i]); + + return (ret > 0 ? (uint32_t)ret : 0); +} + +struct ubcore_tp *ubcore_find_get_tp_in_tpg(struct ubcore_tpg *tpg, + uint32_t tpn) +{ + struct ubcore_tp *tp = NULL; + uint32_t i; + + mutex_lock(&tpg->mutex); + for (i = 0; i < tpg->tpg_cfg.tp_cnt; i++) { + if (tpg->tp_list[i] == NULL || tpg->tp_list[i]->tpn != tpn) + continue; + + tp = tpg->tp_list[i]; + + ubcore_tp_get(tp); + mutex_unlock(&tpg->mutex); + return tp; + } + mutex_unlock(&tpg->mutex); + + return NULL; +} + +int ubcore_find_add_tpg(struct ubcore_device *dev, struct ubcore_tpg *tpg) +{ + struct ubcore_hash_table *ht = &dev->ht[UBCORE_HT_TPG]; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -EINVAL; + } + + if (ubcore_hash_table_lookup_nolock(ht, tpg->tpgn, &tpg->tpgn) != + NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubcore_hash_table_add_nolock(ht, &tpg->hnode, tpg->tpgn); + ubcore_tpg_kref_get(tpg); + spin_unlock(&ht->lock); + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcore_tpg.h b/drivers/ub/urma/ubcore/ubcore_tpg.h new file mode 100644 index 000000000000..4099a6cfbca5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tpg.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore tpg header + * Author: Yan Fangfang + * Create: 2023-07-17 + * Note: + * History: 2023-07-17: Create file + */ +#ifndef UBCORE_TPG_H +#define UBCORE_TPG_H + +#include + +struct ubcore_tpg *ubcore_create_tpg(struct ubcore_device *dev, + struct ubcore_tpg_cfg *cfg); +struct ubcore_tpg *ubcore_find_get_tpg(struct ubcore_device *dev, + uint32_t tpgn); +int ubcore_create_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg, + struct ubcore_tp_cfg *cfg); +int ubcore_destroy_multi_tp(struct ubcore_device *dev, struct ubcore_tpg *tpg); +int ubcore_destroy_multi_tp_from_index(struct ubcore_device *dev, + struct ubcore_tpg *tpg, + uint32_t from_index); +uint32_t ubcore_modify_tp_in_tpg(struct ubcore_device *dev, + struct ubcore_tpg *tpg, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + struct ubcore_tp **failed_tp); +struct ubcore_tp *ubcore_find_get_tp_in_tpg(struct ubcore_tpg *tpg, + uint32_t tpn); +int ubcore_find_remove_tpg(struct ubcore_device *dev, uint32_t tpgn); +void ubcore_tpg_get(void *obj); +void ubcore_tpg_kref_put(struct ubcore_tpg *tpg); +int ubcore_add_tp(struct ubcore_device *dev, struct ubcore_tp *tp); +struct ubcore_tp *ubcore_find_get_tp(struct ubcore_device *dev, uint32_t tpn); +struct ubcore_tp *ubcore_find_remove_tp_node(struct ubcore_device *dev, + uint32_t tpn); +int ubcore_find_add_tpg(struct ubcore_device *dev, struct ubcore_tpg *tpg); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_umem.c b/drivers/ub/urma/ubcore/ubcore_umem.c new file mode 100644 index 000000000000..76686dd067ee --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_umem.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore device add and remove ops file + * Author: Fan Yizhen + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ubcore_log.h" +#include + +static void umem_unpin_pages(struct ubcore_umem *umem, uint64_t nents) +{ + struct scatterlist *sg; + uint32_t i; + + for_each_sg(umem->sg_head.sgl, sg, nents, i) { + struct page *page = NULL; + + if (sg == NULL) { + ubcore_log_err("Invalid sg pointer.\n"); + continue; + } + + page = sg_page(sg); + if (page == NULL) { + ubcore_log_err("Invalid page pointer.\n"); + continue; + } + /* Prevent a large number of concurrent accesses + * from holding spin_lock for too long, causing system reset + */ + cond_resched(); + unpin_user_page(page); + } + sg_free_table(&umem->sg_head); +} + +static void umem_free_sgt(struct ubcore_umem *umem) +{ + umem_unpin_pages(umem, umem->sg_head.nents); +} + +static inline uint64_t umem_cal_npages(uint64_t va, uint64_t len) +{ + return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / + PAGE_SIZE; +} + +static int umem_pin_pages(uint64_t cur_base, uint64_t npages, + uint32_t gup_flags, struct page **page_list) +{ + int pinned; + + pinned = pin_user_pages_fast(cur_base, + min_t(unsigned long, (unsigned long)npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags | FOLL_LONGTERM, page_list); + + return pinned; +} + +static uint64_t umem_atomic_add(uint64_t npages, struct mm_struct *mm) +{ + uint64_t ret; + + ret = atomic64_add_return(npages, &mm->pinned_vm); + + return ret; +} + +static void umem_atomic_sub(uint64_t npages, struct mm_struct *mm) +{ + atomic64_sub(npages, &mm->pinned_vm); +} + +static struct scatterlist *umem_sg_set_page(struct scatterlist *sg_start, + int pinned, struct page **page_list) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sg_start, sg, pinned, i) { + sg_set_page(sg, page_list[i], PAGE_SIZE, 0); + } + return sg; +} + +static int umem_add_new_pinned(struct ubcore_umem *umem, uint64_t npages) +{ + uint64_t lock_limit; + uint64_t new_pinned; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + new_pinned = umem_atomic_add(npages, umem->owning_mm); + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + ubcore_log_err( + "Npages to be pinned is greater than RLIMIT_MEMLOCK[%llu].\n", + lock_limit); + return -ENOMEM; + } + return 0; +} + +static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, + uint32_t gup_flags, struct page **page_list) +{ + struct scatterlist *sg_list_start = umem->sg_head.sgl; + uint64_t cur_base = umem->va & PAGE_MASK; + uint64_t page_count = npages; + int pinned; + + while (page_count != 0) { + cond_resched(); + pinned = umem_pin_pages(cur_base, page_count, gup_flags, + page_list); + if (pinned < 0) { + ubcore_log_err( + "Pin pages failed, cur_base: %llx, page_count: %llx, pinned: %d.\n", + cur_base, page_count, pinned); + return npages - page_count; + } + cur_base += (uint64_t)pinned * PAGE_SIZE; + page_count -= (uint64_t)pinned; + sg_list_start = + umem_sg_set_page(sg_list_start, pinned, page_list); + } + return npages; +} + +static int umem_verify_input(struct ubcore_device *ub_dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) +{ + if (ub_dev == NULL || ((va + len) < va) || + PAGE_ALIGN(va + len) < (va + len)) { + ubcore_log_err("Invalid parameter, va or len is invalid.\n"); + return -EINVAL; + } + if (flag.bs.non_pin == 1) { + ubcore_log_err("Non-pin mode is not supported.\n"); + return -EINVAL; + } + if (can_do_mlock() == 0) + return -EPERM; + return 0; +} + +static int umem_dma_map(struct ubcore_umem *umem, uint64_t npages, + unsigned long dma_attrs) +{ + int ret; + + ret = dma_map_sg_attrs(umem->ub_dev->dma_dev, umem->sg_head.sgl, + (int)npages, DMA_BIDIRECTIONAL, dma_attrs); + if (ret == 0) { + ubcore_log_err("Dma map failed, ret: %d\n", ret); + return -ENOMEM; + } + umem->nmap += (uint32_t)ret; + return 0; +} + +static int ubcore_fill_umem(struct ubcore_umem *umem, struct ubcore_device *dev, + uint64_t va, uint64_t len, + union ubcore_umem_flag flag) +{ + umem->ub_dev = dev; + umem->va = va; + umem->length = len; + umem->flag = flag; + umem->owning_mm = current->mm; + if (!umem->owning_mm) { + ubcore_log_err("mm is null.\n"); + return -EINVAL; + } + mmgrab(umem->owning_mm); + return 0; +} + +static struct ubcore_umem *ubcore_get_target_umem(struct ubcore_device *dev, + uint64_t va, uint64_t len, + union ubcore_umem_flag flag, + struct page **page_list) +{ + /* FOLL_LONGTERM flag added when pin_user_pages_fast called */ + uint32_t gup_flags = (flag.bs.writable == 1) ? FOLL_WRITE : 0; + unsigned long dma_attrs = 0; + struct ubcore_umem *umem; + uint64_t npages; + uint64_t pinned; + int ret = 0; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (umem == NULL) { + ret = -ENOMEM; + goto out; + } + + ret = ubcore_fill_umem(umem, dev, va, len, flag); + if (ret != 0) { + kfree(umem); + goto out; + } + + npages = umem_cal_npages(umem->va, umem->length); + if (npages == 0 || npages > UINT_MAX) { + ret = -EINVAL; + goto umem_kfree; + } + + ret = umem_add_new_pinned(umem, npages); + if (ret != 0) + goto sub_pinned_vm; + + ret = sg_alloc_table(&umem->sg_head, (unsigned int)npages, GFP_KERNEL); + if (ret != 0) + goto sub_pinned_vm; + + pinned = umem_pin_all_pages(umem, npages, gup_flags, page_list); + if (pinned != npages) { + ret = -ENOMEM; + goto umem_release; + } + + ret = umem_dma_map(umem, npages, dma_attrs); + if (ret != 0) + goto umem_release; + + goto out; + +umem_release: + umem_unpin_pages(umem, pinned); +sub_pinned_vm: + umem_atomic_sub(npages, umem->owning_mm); +umem_kfree: + mmdrop(umem->owning_mm); + kfree(umem); +out: + free_page((unsigned long)page_list); + return ret != 0 ? ERR_PTR(ret) : umem; +} + +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) +{ + struct page **page_list; + int ret; + + ret = umem_verify_input(dev, va, len, flag); + if (ret < 0) + return ERR_PTR(ret); + + page_list = (struct page **)__get_free_page(GFP_KERNEL); + if (page_list == NULL) + return ERR_PTR(-ENOMEM); + + return ubcore_get_target_umem(dev, va, len, flag, page_list); +} +EXPORT_SYMBOL(ubcore_umem_get); + +void ubcore_umem_release(struct ubcore_umem *umem) +{ + uint64_t npages; + + if (IS_ERR_OR_NULL(umem) || umem->ub_dev == NULL || + umem->owning_mm == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return; + } + + if (((umem->va + umem->length) < umem->va) || + PAGE_ALIGN(umem->va + umem->length) < (umem->va + umem->length)) { + ubcore_log_err("Invalid parameter, va or len is invalid.\n"); + return; + } + + npages = umem_cal_npages(umem->va, umem->length); + dma_unmap_sg(umem->ub_dev->dma_dev, umem->sg_head.sgl, (int)umem->nmap, + DMA_BIDIRECTIONAL); + umem_free_sgt(umem); + umem_atomic_sub(npages, umem->owning_mm); + mmdrop(umem->owning_mm); + kfree(umem); +} +EXPORT_SYMBOL(ubcore_umem_release); + +uint64_t ubcore_umem_find_best_page_size(struct ubcore_umem *umem, + uint64_t page_size_bitmap, uint64_t va) +{ + uint64_t tmp_ps_bitmap; + struct scatterlist *sg; + uint64_t tmp_va, page_off; + dma_addr_t mask; + int i; + + if (IS_ERR_OR_NULL(umem)) { + ubcore_log_err("Invalid parameter.\n"); + return 0; + } + tmp_ps_bitmap = page_size_bitmap & + GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); + + tmp_va = va; + mask = tmp_ps_bitmap & GENMASK(BITS_PER_LONG - 1, + bits_per((umem->length - 1 + va) ^ va)); + page_off = umem->va & ~PAGE_MASK; + + for_each_sg(umem->sg_head.sgl, sg, umem->sg_head.nents, i) { + mask |= (sg_dma_address(sg) + page_off) ^ tmp_va; + tmp_va += sg_dma_len(sg) - page_off; + if (i != (umem->sg_head.nents - 1)) + mask |= tmp_va; + page_off = 0; + } + + if (mask) + tmp_ps_bitmap &= GENMASK(count_trailing_zeros(mask), 0); + + return tmp_ps_bitmap ? rounddown_pow_of_two(tmp_ps_bitmap) : 0; +} +EXPORT_SYMBOL(ubcore_umem_find_best_page_size); diff --git a/drivers/ub/urma/ubcore/ubcore_utp.c b/drivers/ub/urma/ubcore/ubcore_utp.c new file mode 100644 index 000000000000..18866dfef398 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_utp.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore utp implementation + * Author: Ji Lei + * Create: 2023-08-03 + * Note: + * History: 2023-08-03: Create file + */ + +#include +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_device.h" +#include "ubcore_utp.h" + +static void ubcore_destroy_utp(struct ubcore_utp *utp) +{ + struct ubcore_device *dev = utp->ub_dev; + uint32_t utp_idx = utp->utpn; + int ret; + + if (dev->ops == NULL || dev->ops->destroy_utp == NULL) + return; + + ret = dev->ops->destroy_utp(utp); + if (ret != 0) { + ubcore_log_err("Failed to destroy utp:%u", utp_idx); + return; + } +} + +static void ubcore_utp_kref_release(struct kref *ref_cnt) +{ + struct ubcore_utp *utp = + container_of(ref_cnt, struct ubcore_utp, ref_cnt); + + ubcore_destroy_utp(utp); +} + +void ubcore_utp_kref_put(struct ubcore_utp *utp) +{ + if (utp == NULL) + return; + + (void)kref_put(&utp->ref_cnt, ubcore_utp_kref_release); +} + +void ubcore_utp_get(void *obj) +{ + struct ubcore_utp *utp = obj; + + kref_get(&utp->ref_cnt); +} + +static void ubcore_utp_kref_get(struct ubcore_utp *utp) +{ + kref_get(&utp->ref_cnt); +} + +static int ubcore_find_add_utp(struct ubcore_hash_table *ht, + struct ubcore_utp *utp) +{ + struct hlist_node *hnode = &utp->hnode; + uint32_t hash = utp->utpn; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -EINVAL; + } + if (ubcore_hash_table_lookup_nolock(ht, hash, + ubcore_ht_key(ht, hnode)) != NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubcore_hash_table_add_nolock(ht, hnode, hash); + ubcore_utp_kref_get(utp); + spin_unlock(&ht->lock); + return 0; +} + +struct ubcore_utp *ubcore_create_utp(struct ubcore_device *dev, + struct ubcore_utp_cfg *cfg) +{ + struct ubcore_utp *utp; + int ret; + + if (dev->ops == NULL || dev->ops->create_utp == NULL) + return NULL; + + ubcore_log_info("Utp mtu config to %u", (uint32_t)cfg->mtu); + + utp = dev->ops->create_utp(dev, cfg, NULL); + if (utp == NULL) { + ubcore_log_err("Failed to create utp"); + return NULL; + } + utp->ub_dev = dev; + utp->utp_cfg = *cfg; + kref_init(&utp->ref_cnt); + + ret = ubcore_find_add_utp(&dev->ht[UBCORE_HT_UTP], utp); + if (ret != 0) { + ubcore_utp_kref_put(utp); + ubcore_log_err("Failed to add utp to the utp table"); + return NULL; + } + return utp; +} + +struct ubcore_utp *ubcore_find_utp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup(&dev->ht[UBCORE_HT_UTP], idx, &idx); +} + +struct ubcore_utp *ubcore_find_get_utp(struct ubcore_device *dev, uint32_t idx) +{ + return ubcore_hash_table_lookup_get(&dev->ht[UBCORE_HT_UTP], idx, &idx); +} + +void ubcore_find_remove_utp(struct ubcore_device *dev, uint32_t idx) +{ + struct ubcore_utp *utp; + + spin_lock(&dev->ht[UBCORE_HT_UTP].lock); + if (dev->ht[UBCORE_HT_UTP].head == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_UTP].lock); + return; + } + utp = ubcore_hash_table_lookup_nolock(&dev->ht[UBCORE_HT_UTP], idx, + &idx); + if (utp == NULL) { + spin_unlock(&dev->ht[UBCORE_HT_UTP].lock); + return; + } + ubcore_hash_table_remove_nolock(&dev->ht[UBCORE_HT_UTP], &utp->hnode); + ubcore_utp_kref_put(utp); + spin_unlock(&dev->ht[UBCORE_HT_UTP].lock); +} diff --git a/drivers/ub/urma/ubcore/ubcore_utp.h b/drivers/ub/urma/ubcore/ubcore_utp.h new file mode 100644 index 000000000000..e30539b1e798 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_utp.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore utp header + * Author: Ji Lei + * Create: 2023-08-03 + * Note: + * History: 2023-08-03: Create file + */ + +#ifndef UBCORE_UTP_H +#define UBCORE_UTP_H + +#include + +struct ubcore_utp *ubcore_create_utp(struct ubcore_device *dev, + struct ubcore_utp_cfg *cfg); +struct ubcore_utp *ubcore_find_utp(struct ubcore_device *dev, uint32_t idx); +struct ubcore_utp *ubcore_find_get_utp(struct ubcore_device *dev, uint32_t idx); +void ubcore_find_remove_utp(struct ubcore_device *dev, uint32_t idx); +void ubcore_utp_get(void *obj); +void ubcore_utp_kref_put(struct ubcore_utp *utp); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_uvs.c b/drivers/ub/urma/ubcore/ubcore_uvs.c new file mode 100644 index 000000000000..31ba2ca93f4b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs.c @@ -0,0 +1,735 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Ubcore uvs management + * Author: Huawei + * Create: 2024-5-21 + */ + +#include "ubcore_log.h" +#include "ubcore_priv.h" +#include "ubcore_uvs.h" + +struct ubcore_uvs_list { + spinlock_t lock; + struct list_head list; /* uvs instance list */ + int count; /* number of uvs instance in list */ + uint32_t next_id; /* next id for uvs */ +}; + +static struct ubcore_uvs_list g_ubcore_uvs_instances = { 0 }; + +/* In the lifecyle of ubcore, ue2uvs_tables doesn't support to delete emelent. + * Elements of g_ubcore_ue_tables will be free when unload ubcore module. + */ +static spinlock_t g_ubcore_ue_tables_lock; +static struct ubcore_ue_table *g_ubcore_ue_tables[UBCORE_MAX_MUE_NUM] = { 0 }; + +static inline struct ubcore_uvs_list *get_uvs_instances(void) +{ + return &g_ubcore_uvs_instances; +} + +int ubcore_uvs_list_get_alive_count(void) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *ins; + int count = 0; + + spin_lock(&instances->lock); + list_for_each_entry(ins, &instances->list, list_node) { + if (ins->state == UBCORE_UVS_STATE_ALIVE) + count++; + } + spin_unlock(&instances->lock); + + return count; +} + +struct ubcore_uvs_instance **ubcore_uvs_list_get_all_alive(int *count) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *cur; + struct ubcore_uvs_instance **result; + int i = 0; + *count = 0; + + spin_lock(&instances->lock); + if (instances->count == 0) { + spin_unlock(&instances->lock); + return NULL; + } + + result = kcalloc(instances->count, sizeof(struct ubcore_uvs_instance *), + GFP_ATOMIC); + if (result == NULL) { + spin_unlock(&instances->lock); + return NULL; + } + + list_for_each_entry(cur, &instances->list, list_node) { + if (cur->state != UBCORE_UVS_STATE_ALIVE) + continue; + + result[i] = cur; + ubcore_uvs_kref_get(cur); + i++; + } + spin_unlock(&instances->lock); + + *count = i; + return result; +} + +void ubcore_uvs_list_put(struct ubcore_uvs_instance **uvs_list, int count) +{ + int i; + + if (uvs_list == NULL || count == 0) + return; + + for (i = 0; i < count; i++) + ubcore_uvs_kref_put(uvs_list[i]); + + kfree(uvs_list); +} + +void ubcore_uvs_list_init(void) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + + spin_lock_init(&instances->lock); + INIT_LIST_HEAD(&instances->list); + instances->count = 0; + /* 0 for invalid uvs id */ + instances->next_id = 1; +} + +void ubcore_uvs_list_uninit(void) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *ins, *tmp; + + spin_lock(&instances->lock); + list_for_each_entry_safe(ins, tmp, &instances->list, list_node) { + list_del(&ins->list_node); + ubcore_uvs_kref_put(ins); + } + instances->count = 0; + instances->next_id = 0; + spin_unlock(&instances->lock); +} + +static inline int ubcore_uvs_cmp(const struct ubcore_uvs_instance *a, + const struct ubcore_uvs_instance *b) +{ + return (a->id == b->id && strcmp(a->name, b->name) == 0) ? 0 : -1; +} + +/* used in single UVS scenario */ +struct ubcore_uvs_instance *ubcore_get_default_uvs(void) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *uvs = NULL; + + spin_lock(&instances->lock); + if (!list_empty(&instances->list)) { + uvs = list_first_entry(&instances->list, + typeof(struct ubcore_uvs_instance), + list_node); + ubcore_uvs_kref_get(uvs); + } + + spin_unlock(&instances->lock); + + return uvs; +} + +void ubcore_ue2uvs_tables_init(void) +{ + int i; + + for (i = 0; i < UBCORE_MAX_MUE_NUM; i++) + g_ubcore_ue_tables[i] = NULL; + + spin_lock_init(&g_ubcore_ue_tables_lock); +} + +/* this is called when ubcore module is unloading */ +void ubcore_ue2uvs_tables_uninit(void) +{ + struct ubcore_ue_table **tables = g_ubcore_ue_tables; + int i; + + for (i = 0; i < UBCORE_MAX_MUE_NUM; i++) { + if (tables[i] == NULL) + continue; + + ubcore_log_info("delete ue2uvs table %d for mue %s\n", i, + tables[i]->mue_name); + kfree(tables[i]); + tables[i] = NULL; + } +} + +static int ue2uvs_tables_find_nolock(const char *mue_name) +{ + struct ubcore_ue_table **tables = g_ubcore_ue_tables; + int i; + + for (i = 0; i < UBCORE_MAX_MUE_NUM; i++) { + if (tables[i] == NULL) + continue; + + if (strcmp(mue_name, tables[i]->mue_name) == 0) + break; + } + + return i; +} + +static int ue2uvs_tables_find_first_unused_nolock(void) +{ + struct ubcore_ue_table **tables = g_ubcore_ue_tables; + int i; + + for (i = 0; i < UBCORE_MAX_MUE_NUM; i++) { + if (tables[i] == NULL) + break; + } + + return i; +} + +/* Fetch one ue2uvs table for mue, if the table doesn't exist, then create a new one. */ +struct ubcore_ue_table *ubcore_ue2uvs_fetch(const char *mue_name) +{ + struct ubcore_ue_table **tables = g_ubcore_ue_tables; + struct ubcore_ue_table *result = NULL; + int idx; + + spin_lock(&g_ubcore_ue_tables_lock); + idx = ue2uvs_tables_find_nolock(mue_name); + if (idx >= UBCORE_MAX_MUE_NUM) { + /* select one free slot to use when no reusable slot found */ + idx = ue2uvs_tables_find_first_unused_nolock(); + if (idx == UBCORE_MAX_MUE_NUM) { + spin_unlock(&g_ubcore_ue_tables_lock); + ubcore_log_err( + "number of ue2uvs table slot reaches the max %d\n", + UBCORE_MAX_MUE_NUM); + return result; + } + + /* found one free slot */ + ubcore_log_info( + "found a free slot %d to create new ue2uvs table for %s\n", + idx, mue_name); + } + + if (tables[idx] == NULL) { + tables[idx] = + kzalloc(sizeof(struct ubcore_ue_table), GFP_ATOMIC); + if (tables[idx] == NULL) { + spin_unlock(&g_ubcore_ue_tables_lock); + return result; + } + + (void)strscpy(tables[idx]->mue_name, mue_name, UBCORE_MAX_DEV_NAME); + spin_lock_init(&tables[idx]->ue2uvs_lock); + ubcore_log_info( + "create a new ue2uvs table at slot %d for mue %s\n", + idx, mue_name); + } else + ubcore_log_info("reuse ue2uvs table slot %d for mue %s\n", idx, + mue_name); + + result = tables[idx]; + spin_unlock(&g_ubcore_ue_tables_lock); + + return result; +} + +bool ubcore_check_ue2uvs_mapping(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t target_uvs_id) +{ + struct ubcore_uvs_instance *uvs; + bool result = false; + + uvs = ubcore_find_get_uvs_by_ue(dev, ue_idx); + if (uvs == NULL) + return result; + + if (uvs->id == target_uvs_id) + result = true; + + ubcore_uvs_kref_put(uvs); + return result; +} + +struct ubcore_uvs_instance *ubcore_find_get_uvs_by_ue(struct ubcore_device *dev, + uint32_t ue_idx) +{ + struct ubcore_ue_table *ue2uvs_table; + struct ubcore_uvs_instance *uvs; + + if (!dev->attr.tp_maintainer) { + ubcore_log_err( + "try to find uvs in non-mue device %s, ue_idx %u\n", + dev->dev_name, ue_idx); + return NULL; + } + + /* use the first uvs when transport type is not UB */ + if (dev->transport_type != UBCORE_TRANSPORT_UB) + return ubcore_get_default_uvs(); + + if (ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("invalid ue_idx %u, valid range [0, %u)\n", + ue_idx, UBCORE_MAX_UE_CNT); + return NULL; + } + + ue2uvs_table = ubcore_ue2uvs_fetch(dev->dev_name); + if (ue2uvs_table == NULL) { + ubcore_log_err( + "ue2uvs tables is full, can't create new ue2table\n"); + return NULL; + } + + spin_lock(&ue2uvs_table->ue2uvs_lock); + uvs = ue2uvs_table->ue_entries[ue_idx].uvs_inst; + if (uvs != NULL) + ubcore_uvs_kref_get(uvs); + else + ubcore_log_err("Fail to find uvs, ue_idx is %u.\n", ue_idx); + spin_unlock(&ue2uvs_table->ue2uvs_lock); + + return uvs; +} + +static int ubcore_unset_ue2uvs_mapping(struct ubcore_ue_table *ue_table, + uint32_t ue_idx) +{ + struct ubcore_ue_entry *entries = ue_table->ue_entries; + struct ubcore_uvs_instance *old_uvs; + + spin_lock(&ue_table->ue2uvs_lock); + old_uvs = entries[ue_idx].uvs_inst; + if (old_uvs == NULL) { + spin_unlock(&ue_table->ue2uvs_lock); + return 0; + } + + /* unset mapping */ + atomic_dec(&old_uvs->map2ue); + entries[ue_idx].uvs_inst = NULL; + bitmap_zero(entries[ue_idx].eid_bitmap, UBCORE_MAX_EID_CNT); + spin_unlock(&ue_table->ue2uvs_lock); + + ubcore_log_info( + "remove ue2uvs mapping, ue_idx %u, uvs_name %s, uvs_id %u\n", + ue_idx, old_uvs->name, old_uvs->id); + ubcore_uvs_kref_put(old_uvs); + return 0; +} + +static int +ubcore_set_ue2uvs_mapping_internal(struct ubcore_ue_table *ue2uvs_table, + uint32_t ue_idx, const char *uvs_name) +{ + struct ubcore_ue_entry *entries = ue2uvs_table->ue_entries; + struct ubcore_uvs_instance *uvs, *old_uvs; + + uvs = ubcore_uvs_lookup_get(uvs_name); + if (uvs == NULL) { + ubcore_log_err("uvs %s doesn't exist\n", uvs_name); + return -ENOENT; + } + + spin_lock(&ue2uvs_table->ue2uvs_lock); + old_uvs = entries[ue_idx].uvs_inst; + if (old_uvs == NULL) { + entries[ue_idx].uvs_inst = uvs; + atomic_inc(&uvs->map2ue); + spin_unlock(&ue2uvs_table->ue2uvs_lock); + ubcore_log_info( + "add new ue2uvs mapping, ue_idx %u, uvs_name %s, uvs_id %u\n", + ue_idx, uvs_name, uvs->id); + return 0; + } + + /* old uvs is same with the target uvs, checking if we can reuse it */ + if (ubcore_uvs_cmp(old_uvs, uvs) == 0) { + if (old_uvs->state == UBCORE_UVS_STATE_ALIVE) { + ubcore_log_info( + "reuse ue2uvs mapping ue_idx %u, uvs %s, id %u\n", + ue_idx, uvs_name, old_uvs->id); + spin_unlock(&ue2uvs_table->ue2uvs_lock); + ubcore_uvs_kref_put(uvs); + return 0; + } + + spin_unlock(&ue2uvs_table->ue2uvs_lock); + ubcore_log_err( + "uvs state is not alive when set ue2uvs mapping, ue_idx %u, uvs %s\n", + ue_idx, uvs_name); + ubcore_uvs_kref_put(uvs); + return -EPERM; + } + + /* old uvs and target uvs are different, checking state of old uvs */ + if (old_uvs->state == UBCORE_UVS_STATE_ALIVE) { + ubcore_log_err( + "ue_idx %u is already mapped to uvs %s, uvs_id %u\n", + ue_idx, old_uvs->name, old_uvs->id); + spin_unlock(&ue2uvs_table->ue2uvs_lock); + ubcore_uvs_kref_put(uvs); + return -EEXIST; + } + + /* old uvs is dead, replace it */ + entries[ue_idx].uvs_inst = uvs; + atomic_inc(&uvs->map2ue); + atomic_dec(&old_uvs->map2ue); + spin_unlock(&ue2uvs_table->ue2uvs_lock); + ubcore_log_info( + "replace mapping ue_idx %u, old_uvs %s, old_id %u with new_uvs %s, new_id %u", + ue_idx, old_uvs->name, old_uvs->id, uvs->name, uvs->id); + ubcore_uvs_kref_put(old_uvs); + return 0; +} + +int ubcore_set_ue2uvs_mapping(struct ubcore_device *dev, uint32_t ue_idx, + const char *uvs_name) +{ + struct ubcore_ue_table *ue2uvs_table; + + if (ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("invalid ue_idx %u, valid range [0, %u)\n", + ue_idx, UBCORE_MAX_UE_CNT); + return -EINVAL; + } + + ue2uvs_table = ubcore_ue2uvs_fetch(dev->dev_name); + if (ue2uvs_table == NULL) { + ubcore_log_err( + "ue2uvs tables is full, can't create new ue2table.\n"); + return -ENOSPC; + } + + /* remove the mapping when uvs_name is an empty string */ + if (strlen(uvs_name) == 0) + return ubcore_unset_ue2uvs_mapping(ue2uvs_table, ue_idx); + + return ubcore_set_ue2uvs_mapping_internal(ue2uvs_table, ue_idx, + uvs_name); +} + +struct ubcore_uvs_instance *ubcore_uvs_find_get_by_genl_port(uint32_t genl_port) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *result = NULL; + struct ubcore_uvs_instance *ins; + + spin_lock(&instances->lock); + list_for_each_entry(ins, &instances->list, list_node) { + if (ins->genl_port == genl_port) { + result = ins; + ubcore_uvs_kref_get(result); + break; + } + } + spin_unlock(&instances->lock); + + return result; +} + +struct ubcore_uvs_instance *ubcore_uvs_lookup_get(const char *uvs_name) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *result = NULL; + struct ubcore_uvs_instance *ins; + + spin_lock(&instances->lock); + list_for_each_entry(ins, &instances->list, list_node) { + if (strcmp(ins->name, uvs_name) == 0) { + result = ins; + ubcore_uvs_kref_get(result); + break; + } + } + spin_unlock(&instances->lock); + + return result; +} + +int ubcore_uvs_set_genl_info(const char *uvs_name, uint32_t genl_port, + struct sock *genl_sock) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *cur, *tmp; + int ret = -ENODATA; + + spin_lock(&instances->lock); + list_for_each_entry_safe(cur, tmp, &instances->list, list_node) { + if (strcmp(cur->name, uvs_name) == 0) { + cur->genl_port = genl_port; + cur->genl_sock = genl_sock; + atomic_set(&cur->nl_wait_buffer, 0); + ret = 0; + break; + } + } + if (ret == 0) + ubcore_log_info( + "successfully set genl info for uvs %s, uvs_id %u, genl_port %u\n", + cur->name, cur->id, cur->genl_port); + spin_unlock(&instances->lock); + + return ret; +} + +struct ubcore_uvs_instance *ubcore_find_get_uvs_by_pid(uint32_t pid) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *cur, *tmp; + + spin_lock(&instances->lock); + list_for_each_entry_safe(cur, tmp, &instances->list, list_node) { + if (cur->pid == pid) { + ubcore_uvs_kref_get(cur); + spin_unlock(&instances->lock); + ubcore_log_info("find uvs %s by pid %u\n", cur->name, + pid); + return cur; + } + } + ubcore_log_err("can't find uvs by pid %u\n", pid); + spin_unlock(&instances->lock); + return NULL; +} + +int ubcore_uvs_add(const char *uvs_name, uint32_t policy) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *new_uvs, *cur, *tmp; + bool reuse_uvs = false; + + uint32_t pid; + + pid = (uint32_t)task_tgid_vnr(current); + + new_uvs = kzalloc(sizeof(struct ubcore_uvs_instance), GFP_ATOMIC); + if (new_uvs == NULL) + return -ENOMEM; + + spin_lock(&instances->lock); + if (instances->count == UBCORE_MAX_UVS_CNT) { + ubcore_log_err("number of uvs reaches the max %d\n", + UBCORE_MAX_UVS_CNT); + spin_unlock(&instances->lock); + kfree(new_uvs); + return -ENOSPC; + } + + list_for_each_entry_safe(cur, tmp, &instances->list, list_node) { + if (strcmp(cur->name, uvs_name) != 0) + continue; + + /* we have one old uvs instance which is alive */ + if (cur->state == UBCORE_UVS_STATE_ALIVE) { + spin_unlock(&instances->lock); + ubcore_log_err( + "there is already one running uvs with name %s\n", + uvs_name); + kfree(new_uvs); + return -EEXIST; + } + + /* the old uvs instance is not alive, reuse it and active it */ + reuse_uvs = true; + cur->state = UBCORE_UVS_STATE_ALIVE; + cur->pid = pid; + ubcore_log_info("add uvs %s, pid %u\n", uvs_name, pid); + goto add_out; + } + + (void)strscpy(new_uvs->name, uvs_name, UBCORE_MAX_DEV_NAME); + kref_init(&new_uvs->ref); + spin_lock_init(&new_uvs->sip_list_lock); + INIT_LIST_HEAD(&new_uvs->sip_list); + new_uvs->pid = pid; + ubcore_log_info("add uvs %s, pid %u\n", uvs_name, pid); + new_uvs->id = instances->next_id; + new_uvs->policy = policy; + new_uvs->state = UBCORE_UVS_STATE_ALIVE; + atomic_set(&new_uvs->map2ue, 0); + + list_add_tail(&new_uvs->list_node, &instances->list); + instances->count++; + instances->next_id++; + +add_out: + if (reuse_uvs) { + kfree(new_uvs); + ubcore_log_info("reuse uvs instance %s with id %u, policy %u\n", + cur->name, cur->id, cur->policy); + } else + ubcore_log_info( + "add uvs instance %s with id %u, policy %u done\n", + new_uvs->name, new_uvs->id, new_uvs->policy); + spin_unlock(&instances->lock); + return 0; +} + +int ubcore_uvs_remove(const char *uvs_name) +{ + struct ubcore_uvs_list *instances = get_uvs_instances(); + struct ubcore_uvs_instance *ins, *tmp; + int ret = -ENODATA; + + spin_lock(&instances->lock); + list_for_each_entry_safe(ins, tmp, &instances->list, list_node) { + if (strcmp(ins->name, uvs_name) != 0) + continue; + + if (ins->state == UBCORE_UVS_STATE_DEAD) { + ubcore_log_warn("uvs %s is already set dead.\n", + uvs_name); + ret = -EPERM; + break; + } + + /* Uvs was referenced by ue, can't remove uvs from list. */ + if (atomic_read(&ins->map2ue) != 0) { + ins->state = UBCORE_UVS_STATE_DEAD; + ubcore_log_info( + "uvs %s was referenced by ue, so set dead and keep it.\n", + uvs_name); + spin_unlock(&instances->lock); + return 0; + } + + /* Uvs was not referenced by fe, it can be remove from the list. */ + list_del(&ins->list_node); + ins->state = UBCORE_UVS_STATE_DEAD; + instances->count--; + ubcore_uvs_kref_put(ins); + ret = 0; + break; + } + spin_unlock(&instances->lock); + + if (ret == 0) + ubcore_log_info("uvs instance %s was successfully removed\n", + uvs_name); + + return ret; +} + +static int ubcore_update_set_eid_idx(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t eid_idx, bool is_set) +{ + struct ubcore_ue_table *ue_table; + struct ubcore_ue_entry *ue; + + if (ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("Invalid ue_idx:%u\n", ue_idx); + return -EINVAL; + } + + ue_table = ubcore_ue2uvs_fetch(dev->dev_name); + if (ue_table == NULL) { + ubcore_log_err("Failed to get ue_table mue_name%s.\n", + dev->dev_name); + return -ENOSPC; + } + + spin_lock(&ue_table->ue2uvs_lock); + ue = &ue_table->ue_entries[ue_idx]; + + if (is_set) + set_bit(eid_idx, ue->eid_bitmap); + else + clear_bit(eid_idx, ue->eid_bitmap); + spin_unlock(&ue_table->ue2uvs_lock); + return 0; +} + +int ubcore_ue_set_eid_idx(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t eid_idx) +{ + return ubcore_update_set_eid_idx(dev, ue_idx, eid_idx, true); +} + +int ubcore_ue_clear_eid_idx(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t eid_idx) +{ + return ubcore_update_set_eid_idx(dev, ue_idx, eid_idx, false); +} + +int ubcore_get_eid_use_cnt(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t *eid_use_cnt) +{ + struct ubcore_ue_table *ue_table; + struct ubcore_ue_entry *ue; + + if (ue_idx >= UBCORE_MAX_UE_CNT) { + ubcore_log_err("Invalid ue_idx:%u\n", ue_idx); + return -EINVAL; + } + + ue_table = ubcore_ue2uvs_fetch(dev->dev_name); + if (ue_table == NULL) { + ubcore_log_err("Failed to get ue_table mue_name%s.\n", + dev->dev_name); + return -ENOSPC; + } + + spin_lock(&ue_table->ue2uvs_lock); + ue = &ue_table->ue_entries[ue_idx]; + *eid_use_cnt = bitmap_weight(ue->eid_bitmap, UBCORE_MAX_EID_CNT); + spin_unlock(&ue_table->ue2uvs_lock); + return 0; +} + +static void ubcore_uvs_kref_release(struct kref *ref) +{ + struct ubcore_uvs_instance *uvs = + container_of(ref, struct ubcore_uvs_instance, ref); + + ubcore_log_info("release uvs %s, uvs_id %u\n", uvs->name, uvs->id); + kfree(uvs); +} + +void ubcore_uvs_kref_put(struct ubcore_uvs_instance *uvs) +{ + uint32_t refcnt; + + if (uvs == NULL) + return; + refcnt = kref_read(&uvs->ref); + ubcore_log_debug( + "kref_put: uvs %s, id %u, old refcnt %u, new refcnt %u\n", + uvs->name, uvs->id, refcnt, refcnt > 0 ? refcnt - 1 : 0); + + (void)kref_put(&uvs->ref, ubcore_uvs_kref_release); +} + +void ubcore_uvs_kref_get(struct ubcore_uvs_instance *uvs) +{ + kref_get(&uvs->ref); + ubcore_log_debug("kref_get: uvs %s, id %u, refcnt %u\n", uvs->name, + uvs->id, kref_read(&uvs->ref)); +} diff --git a/drivers/ub/urma/ubcore/ubcore_uvs.h b/drivers/ub/urma/ubcore/ubcore_uvs.h new file mode 100644 index 000000000000..3810e4147328 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Ubcore uvs management + * Author: Huawei + * Create: 2024-5-21 + */ + +#ifndef UBCORE_UVS_H +#define UBCORE_UVS_H + +#include + +#include +#include "ubcore_priv.h" + +int ubcore_uvs_list_get_alive_count(void); +/* call ubcore_uvs_list_put to free list memory */ +struct ubcore_uvs_instance **ubcore_uvs_list_get_all_alive(int *count); +void ubcore_uvs_list_put(struct ubcore_uvs_instance **uvs_list, int count); +void ubcore_uvs_list_init(void); +void ubcore_uvs_list_uninit(void); + +/* whether the ue is mapped to the target uvs */ +bool ubcore_check_ue2uvs_mapping(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t target_uvs_id); +struct ubcore_uvs_instance *ubcore_find_get_uvs_by_ue(struct ubcore_device *dev, + uint32_t ue_idx); +struct ubcore_uvs_instance *ubcore_find_get_uvs_by_pid(uint32_t pid); +int ubcore_set_ue2uvs_mapping(struct ubcore_device *dev, uint32_t ue_idx, + const char *uvs_name); + +void ubcore_ue2uvs_tables_init(void); +void ubcore_ue2uvs_tables_uninit(void); +struct ubcore_ue_table *ubcore_ue2uvs_fetch(const char *mue_name); + +int ubcore_uvs_add(const char *uvs_name, uint32_t policy); +int ubcore_uvs_remove(const char *uvs_name); + +int ubcore_ue_set_eid_idx(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t eid_idx); +int ubcore_ue_clear_eid_idx(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t eid_idx); +int ubcore_get_eid_use_cnt(struct ubcore_device *dev, uint32_t ue_idx, + uint32_t *eid_use_cnt); + +int ubcore_uvs_set_genl_info(const char *uvs_name, uint32_t genl_port, + struct sock *genl_sock); +struct ubcore_uvs_instance * +ubcore_uvs_find_get_by_genl_port(uint32_t genl_port); +struct ubcore_uvs_instance *ubcore_uvs_lookup_get(const char *uvs_name); +struct ubcore_uvs_instance *ubcore_get_default_uvs(void); + +void ubcore_uvs_kref_put(struct ubcore_uvs_instance *uvs); +void ubcore_uvs_kref_get(struct ubcore_uvs_instance *uvs); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c new file mode 100644 index 000000000000..13a7b379da25 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore uvs cmd implement + * Author: Ji Lei + * Create: 2023-07-03 + * Note: + * History: 2023-07-03: create file + */ + +#include +#include +#include + +#include +#include "ubcore_device.h" +#include "ubcore_priv.h" +#include "ubcore_cmd_tlv.h" +#include "ubcore_topo_info.h" +#include +#include "net/ubcore_cm.h" +#include "ubcore_uvs_cmd.h" + +static int ubcore_eidtbl_add_entry(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t *eid_idx, + struct net *net) +{ + uint32_t i; + + if (dev->eid_table.eid_entries == NULL) + return -EINVAL; + + for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (memcmp(dev->eid_table.eid_entries[i].eid.raw, eid->raw, + UBCORE_EID_SIZE) == 0) { + ubcore_log_warn("eid already exists\n"); + return 0; + } + } + for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (dev->eid_table.eid_entries[i].valid == false) { + dev->eid_table.eid_entries[i].eid = *eid; + dev->eid_table.eid_entries[i].valid = true; + dev->eid_table.eid_entries[i].eid_index = i; + dev->eid_table.eid_entries[i].net = + (net == NULL) ? &init_net : net; + *eid_idx = i; + ubcore_log_info( + "dev:%s, add eid: %pI6c, idx: %u, net:0x%p\n", + dev->dev_name, eid, i, net); + break; + } + } + if (i == dev->attr.dev_cap.max_eid_cnt) { + ubcore_log_err("eid table is full\n"); + return -1; + } + return 0; +} + +static int ubcore_eidtbl_del_entry(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t *eid_idx) +{ + uint32_t i; + + if (dev->eid_table.eid_entries == NULL) + return -EINVAL; + + for (i = 0; i < dev->attr.dev_cap.max_eid_cnt; i++) { + if (memcmp(dev->eid_table.eid_entries[i].eid.raw, eid->raw, + UBCORE_EID_SIZE) == 0) { + (void)memset(&dev->eid_table.eid_entries[i], 0, + sizeof(struct ubcore_eid_entry)); + *eid_idx = i; + ubcore_log_info("dev:%s, del eid: %pI6c, idx: %u\n", + dev->dev_name, eid, i); + break; + } + } + if (i == dev->attr.dev_cap.max_eid_cnt) { + ubcore_log_err("eid table is empty"); + return -1; + } + return 0; +} + +static inline void ubcore_dispatch_eid_change(struct ubcore_device *dev, + uint32_t eid_idx) +{ + struct ubcore_event event; + + event.ub_dev = dev; + event.event_type = UBCORE_EVENT_EID_CHANGE; + event.element.eid_idx = eid_idx; + + ubcore_dispatch_async_event(&event); +} + +static int ubcore_eidtbl_update_entry(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t eid_idx, + bool is_add, struct net *net) +{ + if (dev->eid_table.eid_entries == NULL) + return -EINVAL; + + if (eid_idx >= dev->attr.dev_cap.max_eid_cnt) { + ubcore_log_err("eid table is full\n"); + return -1; + } + if (is_add) + dev->eid_table.eid_entries[eid_idx].eid = *eid; + else + (void)memset(&dev->eid_table.eid_entries[eid_idx].eid, 0, + sizeof(union ubcore_eid)); + /* dispatch eid change for both eid add and remove */ + ubcore_dispatch_eid_change(dev, eid_idx); + + dev->eid_table.eid_entries[eid_idx].valid = is_add; + dev->eid_table.eid_entries[eid_idx].eid_index = eid_idx; + dev->eid_table.eid_entries[eid_idx].net = net; + ubcore_log_info("%s eid: %pI6c, idx: %u\n", + is_add == true ? "add" : "del", eid, eid_idx); + return 0; +} + +int ubcore_update_eidtbl_by_eid(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t *eid_idx, + bool is_alloc_eid, struct net *net) +{ + int ret; + + spin_lock(&dev->eid_table.lock); + if (is_alloc_eid) + ret = ubcore_eidtbl_add_entry(dev, eid, eid_idx, net); + else + ret = ubcore_eidtbl_del_entry(dev, eid, eid_idx); + + spin_unlock(&dev->eid_table.lock); + return ret; +} + +int ubcore_update_eidtbl_by_idx(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t eid_idx, + bool is_alloc_eid, struct net *net) +{ + int ret; + + spin_lock(&dev->eid_table.lock); + ret = ubcore_eidtbl_update_entry(dev, eid, eid_idx, is_alloc_eid, net); + spin_unlock(&dev->eid_table.lock); + return ret; +} + +static int ubcore_get_eid_index(struct ubcore_device *dev, + union ubcore_eid *eid, uint32_t *eid_index) +{ + uint32_t idx, ret = 0; + + spin_lock(&dev->eid_table.lock); + for (idx = 0; idx < dev->eid_table.eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0) { + *eid_index = idx; + break; + } + } + if (idx == dev->eid_table.eid_cnt) + ret = -1; + spin_unlock(&dev->eid_table.lock); + return ret; +} + +static int ubcore_create_jetty_rsrc(struct ubcore_topo_map *topo_map) +{ + struct ubcore_device *dev; + struct ubcore_eid_info eid_info = { 0 }; + struct ubcore_topo_info *cur_node_info; + int i, ret; + bool has_any_primary_eid = false; + + cur_node_info = ubcore_get_cur_topo_info(topo_map); + if (cur_node_info == NULL) { + ubcore_log_err("Failed to get cur node info\n"); + return -EINVAL; + } + + for (i = 0; i < IODIE_NUM; i++) { + if (!is_eid_valid(cur_node_info->io_die_info[i].primary_eid)) + continue; + has_any_primary_eid = true; + (void)memcpy(&eid_info.eid, + cur_node_info->io_die_info[i].primary_eid, + sizeof(union ubcore_eid)); + dev = ubcore_get_device_by_eid(&eid_info.eid, + UBCORE_TRANSPORT_UB); + if (dev == NULL) { + ubcore_log_err( + "primary %d dev not exist, eid: " EID_FMT "\n", + i, + EID_RAW_ARGS(cur_node_info->io_die_info[i] + .primary_eid)); + return -1; + } + + ret = ubcore_get_eid_index(dev, &eid_info.eid, + &eid_info.eid_index); + if (ret != 0) { + ubcore_log_err("Failed to get eid index\n"); + return ret; + } + + ret = ubcore_call_cm_eid_ops(dev, &eid_info, + UBCORE_MGMT_EVENT_EID_ADD); + if (ret != 0) { + ubcore_log_err("Failed to call cm eid ops\n"); + return ret; + } + ubcore_log_info( + "Success to create jetty rsrc: primary %d dev %s, eid: " EID_FMT + ", idx: %d\n", + i, dev->dev_name, + EID_RAW_ARGS(cur_node_info->io_die_info[i].primary_eid), + eid_info.eid_index); + } + return has_any_primary_eid ? 0 : -1; +} + +static int ubcore_cmd_set_topo(struct ubcore_global_file *file, + struct ubcore_cmd_hdr *hdr) +{ + struct ubcore_cmd_set_topo arg; + struct ubcore_topo_map *new_topo_map; + struct ubcore_topo_map *topo_map; + int ret = 0; + + ret = ubcore_global_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + if (arg.in.topo_info == NULL || arg.in.topo_num == 0 || + arg.in.topo_num > MAX_NODE_NUM) { + ubcore_log_err("Invalid set_topo_info param\n"); + return -EINVAL; + } + topo_map = ubcore_get_global_topo_map(); + if (topo_map == NULL) { + topo_map = ubcore_create_global_topo_map(arg.in.topo_info, + arg.in.topo_num); + if (topo_map == NULL) { + ubcore_log_err("Failed to create topo map\n"); + return -ENOMEM; + } + if (!is_bonding_and_primary_eid_valid(topo_map)) { + ubcore_delete_global_topo_map(); + ubcore_log_err("Invalid bonding or primary eid\n"); + return -EINVAL; + } + } else { + new_topo_map = ubcore_create_topo_map_from_user( + arg.in.topo_info, arg.in.topo_num); + if (ubcore_update_topo_map(new_topo_map, topo_map) != 0) { + ubcore_delete_topo_map(new_topo_map); + ubcore_log_err("Failed to update topo info\n"); + return -1; + } + ubcore_delete_topo_map(new_topo_map); + } + ubcore_show_topo_map(topo_map); + + ret = ubcore_create_jetty_rsrc(topo_map); + if (ret != 0) { + ubcore_log_err("Failed to create jetty rsrc\n"); + ubcore_delete_global_topo_map(); + return ret; + } + return 0; +} + +typedef int (*ubcore_uvs_global_cmd_handler)(struct ubcore_global_file *file, + struct ubcore_cmd_hdr *hdr); +struct ubcore_uvs_global_cmd_func { + ubcore_uvs_global_cmd_handler func; + bool need_cap_verify; +}; + +static struct ubcore_uvs_global_cmd_func g_ubcore_uvs_global_cmd_funcs[] = { + [0] = { NULL, false }, + [UBCORE_CMD_SET_TOPO] = { ubcore_cmd_set_topo, true }, +}; + +int ubcore_uvs_global_cmd_parse(struct ubcore_global_file *file, + struct ubcore_cmd_hdr *hdr) +{ + if (hdr->command < UBCORE_CMD_SET_TOPO || + hdr->command >= UBCORE_CMD_GLOBAL_LAST || + g_ubcore_uvs_global_cmd_funcs[hdr->command].func == NULL) { + ubcore_log_err("bad ubcore global command: %d.\n", + (int)hdr->command); + return -EINVAL; + } + + if (g_ubcore_uvs_global_cmd_funcs[hdr->command].need_cap_verify && + !capable(CAP_NET_ADMIN)) { + ubcore_log_err( + "failed cap verify, ubcore global command: %d.\n", + (int)hdr->command); + return -EPERM; + } + return g_ubcore_uvs_global_cmd_funcs[hdr->command].func(file, hdr); +} diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h new file mode 100644 index 000000000000..16b458e6635e --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore uvs cmd header file + * Author: Ji Lei + * Create: 2023-07-03 + * Note: + * History: 2023-07-03: Create file + */ + +#ifndef UBCORE_UVS_CMD_H +#define UBCORE_UVS_CMD_H + +#include +#include + +#include "ubcore_cmd.h" +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" + +#define UBCORE_UVS_CMD_MAGIC 'V' +#define UBCORE_UVS_CMD _IOWR(UBCORE_UVS_CMD_MAGIC, 1, struct ubcore_cmd_hdr) +#define UBCORE_CMD_CHANNEL_INIT_SIZE 32 +#define UBCORE_MAX_VTP_CFG_CNT 32 +#define UBCORE_MAX_EID_CONFIG_CNT 32 +#define UBCORE_MAX_DSCP_VL_NUM 64 +#define UBCORE_CMD_MAX_MUE_NUM 128 + +enum ubcore_uvs_global_cmd { UBCORE_CMD_SET_TOPO = 1, UBCORE_CMD_GLOBAL_LAST }; + +struct ubcore_cmd_set_topo { + struct { + void *topo_info; + uint32_t topo_num; + } in; +}; + +int ubcore_uvs_mue_cmd_parse(struct ubcore_mue_file *file, + struct ubcore_cmd_hdr *hdr); + +int ubcore_uvs_global_cmd_parse(struct ubcore_global_file *file, + struct ubcore_cmd_hdr *hdr); + +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_vtp.c b/drivers/ub/urma/ubcore/ubcore_vtp.c new file mode 100644 index 000000000000..c4035c0f0d7f --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_vtp.c @@ -0,0 +1,2276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore vtp implementation + * Author: Yan Fangfang + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#include +#include "ubcore_connect_adapter.h" +#include "ubcore_msg.h" +#include "ubcore_log.h" +#include "ubcore_hash_table.h" +#include "ubcore_priv.h" +#include +#include "ubcore_netdev.h" +#include "ubcore_uvs.h" +#include "ubcore_tpg.h" +#include "ubcore_utp.h" +#include "ubcore_workqueue.h" +#include "ubcore_msg.h" +#include "ubcore_vtp.h" + +#define UBCORE_SYNC_VTPN_MAX_WAIT_TIMES (30 * 1000 / 20) +#define UBCORE_SYNC_REUSE_VTPN_ONE_TIME 20 +#define UBCORE_VTPS_ERROR 3 + +enum ubcore_vtp_task_type { UBCORE_TASK_DESTROY_VTP }; + +static LIST_HEAD(g_vtp_work_list); +static DEFINE_SPINLOCK(g_vtp_work_lock); + +static LIST_HEAD(g_async_disconnect_vtp_work_list); +static DEFINE_SPINLOCK(g_async_disconnect_vtp_work_lock); + +struct ubcore_vtp_work { + struct list_head node; /* add to g_vtp_work_list */ + struct delayed_work delay_work; + struct ubcore_device *dev; + struct ubcore_vtpn *vtpn; + enum ubcore_vtp_task_type type; + uint32_t retry_times; +}; + +struct ubcore_vtpn_wait_cb_node_imp { + struct list_head node; + struct list_head node_global; + struct ubcore_vtpn_cb_para para; + struct ubcore_wait_vtpn_resp_work *wait_work; +}; + +#define DESTROY_VTP_INI_INTERVAL 3000 /* 3s */ +#define DESTROY_VTP_MAX_RETRY_TIMES 9 + +static struct ubcore_vtpn *ubcore_find_get_vtpn(struct ubcore_device *dev, + struct ubcore_vtp_param *param); +static int ubcore_free_vtpn(struct ubcore_vtpn *vtpn); +static void ubcore_hash_table_rmv_vtpn(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn); + +static void ubcore_add_async_disconnect_vtp_work_list( + struct ubcore_vtpn_wait_cb_node_imp *vtp_work); +static void ubcore_del_async_disconnect_vtp_work_list( + struct ubcore_vtpn_wait_cb_node_imp *vtp_work); + +void ubcore_add_async_wait_list(struct ubcore_vtpn *vtpn, + struct ubcore_vtpn_cb_para *para, + struct ubcore_wait_vtpn_resp_work *wait_work) +{ + struct ubcore_vtpn_wait_cb_node_imp *new_node; + + if (vtpn == NULL || para == NULL) { + ubcore_log_err("invalid parameter"); + return; + } + + new_node = kzalloc(sizeof(struct ubcore_vtpn_wait_cb_node_imp), + GFP_ATOMIC); + if (new_node == NULL) + return; + + new_node->para.type = para->type; + new_node->para.tjetty = para->tjetty; + new_node->para.jetty = para->jetty; + new_node->para.import_cb = para->import_cb; + new_node->para.bind_cb = para->bind_cb; + new_node->para.unimport_cb = para->unimport_cb; + new_node->para.unbind_cb = para->unbind_cb; + new_node->wait_work = wait_work; + if (para->type == UBCORE_IMPORT_JETTY_VTPN || + para->type == UBCORE_BIND_JETTY_VTPN) { + list_add_tail(&new_node->node, &vtpn->list); + INIT_LIST_HEAD(&new_node->node_global); + } else { + list_add_tail(&new_node->node, &vtpn->disconnect_list); + ubcore_add_async_disconnect_vtp_work_list(new_node); + } +} + +void ubcore_del_async_wait_list(struct ubcore_vtpn *vtpn) +{ + struct ubcore_vtpn_wait_cb_node_imp *wait_node, *next; + struct ubcore_unimport_cb *unimport_cb; + struct ubcore_unbind_cb *unbind_cb; + + if (vtpn == NULL) { + ubcore_log_err("invalid parameter"); + return; + } + + list_for_each_entry_safe(wait_node, next, &vtpn->list, node) { + if (wait_node->para.type == UBCORE_UNIMPORT_JETTY_VTPN) { + if (wait_node->para.unimport_cb != NULL) { + unimport_cb = wait_node->para.unimport_cb; + unimport_cb->callback(ECONNREFUSED, + unimport_cb->user_arg); + kfree(wait_node->para.unimport_cb); + wait_node->para.unimport_cb = NULL; + } + } + if (wait_node->para.type == UBCORE_UNBIND_JETTY_VTPN) { + if (wait_node->para.unbind_cb != NULL) { + unbind_cb = wait_node->para.unbind_cb; + unbind_cb->callback(ECONNREFUSED, + unbind_cb->user_arg); + kfree(wait_node->para.unbind_cb); + wait_node->para.unbind_cb = NULL; + } + } + ubcore_del_async_disconnect_vtp_work_list(wait_node); + list_del(&wait_node->node); + kfree(wait_node); + } +} + +static void ubcore_add_async_disconnect_vtp_work_list( + struct ubcore_vtpn_wait_cb_node_imp *vtp_work) +{ + spin_lock(&g_async_disconnect_vtp_work_lock); + list_add_tail(&vtp_work->node_global, + &g_async_disconnect_vtp_work_list); + spin_unlock(&g_async_disconnect_vtp_work_lock); +} + +static void ubcore_del_async_disconnect_vtp_work_list( + struct ubcore_vtpn_wait_cb_node_imp *vtp_work) +{ + spin_lock(&g_async_disconnect_vtp_work_lock); + list_del_init(&vtp_work->node_global); + spin_unlock(&g_async_disconnect_vtp_work_lock); +} + +static void +ubcore_flush_dev_async_disconnect_vtp_work(struct ubcore_device *dev) +{ + struct ubcore_vtpn_wait_cb_node_imp *entry = NULL; + struct ubcore_vtpn_wait_cb_node_imp *next = NULL; + + spin_lock(&g_async_disconnect_vtp_work_lock); + list_for_each_entry_safe(entry, next, &g_async_disconnect_vtp_work_list, + node_global) { + if (entry != NULL && entry->wait_work != NULL && + entry->wait_work->dev == dev) + ubcore_modify_delay_work(UBCORE_DISCONNECT_VTP_ASYNC_WQ, + &entry->wait_work->delay_work, + 0); + } + spin_unlock(&g_async_disconnect_vtp_work_lock); +} + +void ubcore_add_vtp_work_list(struct ubcore_vtp_work *vtp_work) +{ + spin_lock(&g_vtp_work_lock); + list_add_tail(&vtp_work->node, &g_vtp_work_list); + spin_unlock(&g_vtp_work_lock); +} + +void ubcore_del_vtp_work_list(struct ubcore_vtp_work *vtp_work) +{ + spin_lock(&g_vtp_work_lock); + list_del(&vtp_work->node); + spin_unlock(&g_vtp_work_lock); +} + +void ubcore_flush_dev_vtp_work(struct ubcore_device *dev) +{ + struct ubcore_vtp_work *entry = NULL; + struct ubcore_vtp_work *next = NULL; + + spin_lock(&g_vtp_work_lock); + list_for_each_entry_safe(entry, next, &g_vtp_work_list, node) { + if (entry != NULL && entry->dev == dev) + ubcore_modify_delay_work(UBCORE_VTP_TASK_WQ, + &entry->delay_work, 0); + } + spin_unlock(&g_vtp_work_lock); + + ubcore_flush_dev_async_disconnect_vtp_work(dev); +} + +static int ubcore_handle_create_vtp_resp(struct ubcore_device *dev, + struct ubcore_resp *resp, + void *user_arg) +{ + struct ubcore_create_vtp_resp *vtp_resp = NULL; + struct ubcore_vtpn *vtpn = (struct ubcore_vtpn *)user_arg; + + if (resp == NULL || resp->len < sizeof(struct ubcore_create_vtp_resp)) { + ubcore_log_err("invalid ubcore_create_vtp_resp len"); + return (int)UBCORE_MSG_RESP_FAIL; + } + + if (vtpn == NULL) { + ubcore_log_err("invalid user_arg, vtpn == NULL"); + return (int)UBCORE_MSG_RESP_FAIL; + } + + vtp_resp = (struct ubcore_create_vtp_resp *)resp->data; + + switch (vtp_resp->ret) { + case UBCORE_MSG_RESP_FAIL: + ubcore_log_err("failed to create vtp: response error.\n"); + break; + case UBCORE_MSG_RESP_IN_PROGRESS: + ubcore_log_err( + "failed: try to del vtp which is being created. Try again later.\n"); + break; + case UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND: + ubcore_log_err( + "failed: rc jetty already bind by other jetty.\n"); + break; + case UBCORE_MSG_RESP_LIMIT_RATE: + ubcore_log_err( + "failed: the current link setup speed has reached the maximum value.\n"); + break; + case UBCORE_MSG_RESP_SUCCESS: + /* mue may return a new vtpn */ + vtpn->vtpn = vtp_resp->vtpn; + break; + default: + ubcore_log_err( + "unknown the state of vtp reply to create. state: %d\n", + vtp_resp->ret); + break; + } + + return vtp_resp->ret; +} + +static int ubcore_send_create_vtp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *p, + struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *create; + struct ubcore_req *req; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return -ENOMEM; + req->opcode = UBCORE_MSG_CREATE_VTP; + req->len = data_len; + + create = (struct ubcore_create_vtp_req *)req->data; + create->vtpn = vtpn->vtpn; + create->trans_mode = p->trans_mode; + create->local_eid = p->local_eid; + create->peer_eid = p->peer_eid; + create->eid_index = p->eid_index; + create->local_jetty = p->local_jetty; + create->peer_jetty = p->peer_jetty; + (void)strscpy(create->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + create->virtualization = dev->attr.virtualization; + + kfree(req); + return 0; +} + +static void ubcore_handle_vtpn_wait_list(struct ubcore_vtpn *vtpn, + struct ubcore_device *dev, + int vtp_state, int status) +{ + struct ubcore_vtpn_wait_cb_node_imp *wait_node, *next; + struct ubcore_vtpn_cb_para *para; + struct ubcore_wait_vtpn_resp_work *wait_work; + struct ubcore_import_cb *import_cb; + struct ubcore_bind_cb *bind_cb; + + vtpn->state = vtp_state; + + list_for_each_entry_safe(wait_node, next, &vtpn->list, node) { + ubcore_del_async_disconnect_vtp_work_list(wait_node); + + para = &wait_node->para; + wait_work = wait_node->wait_work; + + if (wait_work != NULL && status != ETIMEDOUT) { + cancel_delayed_work_sync(&wait_work->delay_work); + kfree(wait_work); + } + + if (para->type == UBCORE_IMPORT_JETTY_VTPN) { + import_cb = para->import_cb; + import_cb->callback(para->tjetty, status, + import_cb->user_arg); + kfree(para->import_cb); + para->import_cb = NULL; + } else { + bind_cb = para->bind_cb; + bind_cb->callback(para->jetty, para->tjetty, status, + bind_cb->user_arg); + kfree(para->bind_cb); + para->bind_cb = NULL; + } + + if (status != 0) { + if (para->type == UBCORE_IMPORT_JETTY_VTPN) { + mutex_destroy(¶->tjetty->lock); + (void)dev->ops->unimport_jetty(para->tjetty); + } else { + (void)dev->ops->unbind_jetty(para->jetty); + atomic_dec(¶->jetty->use_cnt); + atomic_dec(¶->tjetty->use_cnt); + } + atomic_dec(&vtpn->use_cnt); + } + + list_del(&wait_node->node); + kfree(wait_node); + } +} + +void ubcore_wait_connect_vtp_resp_intime(struct ubcore_msg_session *s, + struct ubcore_device *dev, + struct ubcore_resp *resp) +{ + struct ubcore_vtp_param param = { 0 }; + struct ubcore_create_vtp_req *create; + struct ubcore_vtpn *vtpn; + struct ubcore_req *req; + int ret; + + ubcore_log_info("Success to wait connect vtp resp intime.\n"); + if (s->req == NULL) + return; + req = s->req; + create = (struct ubcore_create_vtp_req *)req->data; + param.trans_mode = create->trans_mode; + param.local_eid = create->local_eid; + param.peer_eid = create->peer_eid; + param.local_jetty = create->local_jetty; + param.peer_jetty = create->peer_jetty; + + vtpn = ubcore_find_get_vtpn(dev, ¶m); + if (vtpn == NULL) { + ubcore_log_err("Failed to find vtpn, seid : " EID_FMT + ", deid : " EID_FMT ".\n", + EID_ARGS(param.local_eid), + EID_ARGS(param.peer_eid)); + return; + } + + mutex_lock(&vtpn->state_lock); + if (vtpn->state != UBCORE_VTPS_RESET) { + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + return; + } + + ret = ubcore_handle_create_vtp_resp(dev, resp, vtpn); + if (ret != 0) { + ubcore_handle_vtpn_wait_list(vtpn, dev, UBCORE_VTPS_ERROR, + ECONNREFUSED); + ubcore_hash_table_rmv_vtpn(dev, vtpn); + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + (void)ubcore_free_vtpn(vtpn); + } else { + ubcore_handle_vtpn_wait_list(vtpn, dev, UBCORE_VTPS_READY, 0); + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + } +} + +static void ubcore_wait_connect_vtp_resp_timeout_inner( + struct ubcore_vtp_param *param, struct ubcore_device *dev, + struct ubcore_wait_vtpn_resp_work *wait_work) +{ + struct ubcore_vtpn *vtpn; + + if (param == NULL || dev == NULL) { + ubcore_log_err("dev or param is null.\n"); + return; + } + + vtpn = ubcore_find_get_vtpn(dev, param); + if (vtpn == NULL) { + ubcore_log_err("Failed to find vtpn.\n"); + return; + } + + mutex_lock(&vtpn->state_lock); + if (vtpn->state != UBCORE_VTPS_RESET) { + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + return; + } + + ubcore_handle_vtpn_wait_list(vtpn, dev, UBCORE_VTPS_ERROR, ETIMEDOUT); + if (atomic_read(&vtpn->use_cnt) == 0) { + ubcore_hash_table_rmv_vtpn(dev, vtpn); + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + (void)ubcore_free_vtpn(vtpn); + } else { + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + } +} + +static void ubcore_wait_connect_vtp_resp_timeout(struct work_struct *work) +{ + struct ubcore_wait_vtpn_resp_work *wait_work; + struct ubcore_msg_session *s; + + ubcore_log_info("Failed to wait connect vtp resp, time out.\n"); + wait_work = container_of(work, struct ubcore_wait_vtpn_resp_work, + delay_work.work); + + s = ubcore_find_msg_session(wait_work->msg_id); + if (s == NULL) { + ubcore_log_err("Failed to find msg session.\n"); + return; + } + + if (!ubcore_set_session_finish(s)) { + ubcore_log_err("Failed to trylock and modify state.\n"); + (void)kref_put(&s->kref, ubcore_free_msg_session); + return; + } + + ubcore_wait_connect_vtp_resp_timeout_inner(&wait_work->param, + wait_work->dev, wait_work); + kfree(s->req); + s->req = NULL; + (void)kref_put(&s->kref, ubcore_free_msg_session); + ubcore_destroy_msg_session(s); + kfree(wait_work); +} + +static struct ubcore_wait_vtpn_resp_work * +ubcore_queue_wait_connect_vtp_resp_task(struct ubcore_device *dev, + struct ubcore_vtp_param *param, + int timeout) +{ + struct ubcore_wait_vtpn_resp_work *wait_work; + int ret; + + wait_work = + kzalloc(sizeof(struct ubcore_wait_vtpn_resp_work), GFP_KERNEL); + if (wait_work == NULL) + return NULL; + + wait_work->dev = dev; + wait_work->s = NULL; + wait_work->param.trans_mode = param->trans_mode; + wait_work->param.local_eid = param->local_eid; + wait_work->param.peer_eid = param->peer_eid; + wait_work->param.local_jetty = param->local_jetty; + wait_work->param.peer_jetty = param->peer_jetty; + wait_work->param.eid_index = param->eid_index; + wait_work->vtpn = NULL; + wait_work->msg_id = 0; + if (timeout <= 0 || timeout > UBCORE_TYPICAL_TIMEOUT) + wait_work->timeout = UBCORE_TYPICAL_TIMEOUT; + else + wait_work->timeout = timeout; + + INIT_DELAYED_WORK(&wait_work->delay_work, + ubcore_wait_connect_vtp_resp_timeout); + ret = ubcore_queue_delayed_work(UBCORE_CONNECT_VTP_ASYNC_WQ, + &wait_work->delay_work, + msecs_to_jiffies(wait_work->timeout)); + if (ret != 0) { + ubcore_log_err("Failed to queue wait connect vtp resp work.\n"); + kfree(wait_work); + return NULL; + } + return wait_work; +} + +static struct ubcore_msg_session * +ubcore_create_async_connect_vtp_req(struct ubcore_device *dev, + struct ubcore_vtp_param *p, + struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *create; + struct ubcore_req *req; + struct ubcore_msg_session *s; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return NULL; + req->opcode = UBCORE_MSG_CREATE_VTP; + req->len = data_len; + + create = (struct ubcore_create_vtp_req *)req->data; + create->vtpn = vtpn->vtpn; + create->trans_mode = p->trans_mode; + create->local_eid = p->local_eid; + create->peer_eid = p->peer_eid; + create->eid_index = p->eid_index; + create->local_jetty = p->local_jetty; + create->peer_jetty = p->peer_jetty; + (void)strscpy(create->dev_name, dev->dev_name, UBCORE_MAX_DEV_NAME); + create->virtualization = dev->attr.virtualization; + + s = ubcore_create_ue2mue_session(req, vtpn); + if (s == NULL) + kfree(req); + return s; +} + +static int ubcore_handle_del_vtp_resp(struct ubcore_device *dev, + struct ubcore_resp *resp, void *user_arg) +{ + struct ubcore_destroy_vtp_resp *vtp_resp = NULL; + + if (resp == NULL || + resp->len < sizeof(struct ubcore_destroy_vtp_resp)) { + ubcore_log_err("invalid parameter"); + return (int)UBCORE_MSG_RESP_FAIL; + } + + vtp_resp = (struct ubcore_destroy_vtp_resp *)resp->data; + switch (vtp_resp->ret) { + case UBCORE_MSG_RESP_SUCCESS: + break; + case UBCORE_MSG_RESP_FAIL: + ubcore_log_err("failed to destroy vtp: response error"); + break; + case UBCORE_MSG_RESP_IN_PROGRESS: + ubcore_log_err( + "failed: try to del vtp which is being deleted. Try again later.\n"); + break; + /* the status of the delete vtp reply is unknown */ + case UBCORE_MSG_RESP_RC_JETTY_ALREADY_BIND: + case UBCORE_MSG_RESP_LIMIT_RATE: + default: + ubcore_log_err("failed: the state of vtp reply to del is:%d.\n", + vtp_resp->ret); + break; + } + + return (int)vtp_resp->ret; +} + +static int ubcore_send_del_vtp_req(struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *destroy; + struct ubcore_req *req; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return -ENOMEM; + req->opcode = UBCORE_MSG_DESTROY_VTP; + req->len = data_len; + + destroy = (struct ubcore_create_vtp_req *)req->data; + destroy->vtpn = vtpn->vtpn; + destroy->trans_mode = vtpn->trans_mode; + destroy->local_eid = vtpn->local_eid; + destroy->peer_eid = vtpn->peer_eid; + destroy->eid_index = vtpn->eid_index; + destroy->local_jetty = vtpn->local_jetty; + destroy->peer_jetty = vtpn->peer_jetty; + (void)strscpy(destroy->dev_name, vtpn->ub_dev->dev_name, + UBCORE_MAX_DEV_NAME); + destroy->virtualization = vtpn->ub_dev->attr.virtualization; + + kfree(req); + return 0; +} + +static void ubcore_handle_disconnect_vtpn_wait_list(struct ubcore_vtpn *vtpn, + int status, int vtp_state) +{ + struct ubcore_vtpn_wait_cb_node_imp *wait_node, *next; + struct ubcore_vtpn_cb_para *para; + struct ubcore_wait_vtpn_resp_work *wait_work; + struct ubcore_unimport_cb *unimport_cb; + struct ubcore_unbind_cb *unbind_cb; + + vtpn->state = vtp_state; + list_for_each_entry_safe(wait_node, next, &vtpn->disconnect_list, + node) { + ubcore_del_async_disconnect_vtp_work_list(wait_node); + + para = &wait_node->para; + wait_work = wait_node->wait_work; + + if (wait_work != NULL && status != ETIMEDOUT) { + cancel_delayed_work_sync(&wait_work->delay_work); + kfree(wait_work); + } + + if (para->type == UBCORE_UNIMPORT_JETTY_VTPN) { + if (para->unimport_cb != NULL) { + unimport_cb = para->unimport_cb; + unimport_cb->callback(status, + unimport_cb->user_arg); + kfree(para->unimport_cb); + para->unimport_cb = NULL; + } + } else { + if (para->unbind_cb != NULL) { + unbind_cb = para->unbind_cb; + unbind_cb->callback(status, + unbind_cb->user_arg); + kfree(para->unbind_cb); + para->unbind_cb = NULL; + } + } + + list_del(&wait_node->node); + kfree(wait_node); + } +} + +void ubcore_wait_disconnect_vtp_resp_intime(struct ubcore_msg_session *s, + struct ubcore_device *dev, + struct ubcore_resp *resp) +{ + struct ubcore_vtpn *vtpn; + int ret; + + ubcore_log_info("Success to wait disconnect vtp resp intime.\n"); + vtpn = s->vtpn; + if (vtpn == NULL) { + ubcore_log_err("vtpn is null.\n"); + return; + } + + mutex_lock(&vtpn->state_lock); + if (vtpn->state == UBCORE_VTPS_ERROR) { + mutex_unlock(&vtpn->state_lock); + return; + } + + ret = ubcore_handle_del_vtp_resp(dev, resp, vtpn); + if (ret != 0) + ubcore_handle_disconnect_vtpn_wait_list(vtpn, ECONNREFUSED, + UBCORE_VTPS_ERROR); + else + ubcore_handle_disconnect_vtpn_wait_list(vtpn, 0, + UBCORE_VTPS_ERROR); + mutex_unlock(&vtpn->state_lock); + + if (atomic_read(&vtpn->use_cnt) == 0) { + ubcore_log_info("vtpn use_cnt is 0, destroy vtpn, vtpn: %u.\n", + vtpn->vtpn); + if (ret == 0 || ret == -ENOENT || + ubcore_queue_destroy_vtp_task(dev, vtpn, 0) != 0) + (void)ubcore_free_vtpn(vtpn); + } +} + +static void ubcore_wait_disconnect_vtp_resp_timeout_inner( + struct ubcore_vtpn *vtpn, struct ubcore_device *dev, + struct ubcore_wait_vtpn_resp_work *wait_work) +{ + if (vtpn == NULL || dev == NULL) { + ubcore_log_err("vtpn or dev is null.\n"); + return; + } + + mutex_lock(&vtpn->state_lock); + if (vtpn->state == UBCORE_VTPS_ERROR) { + mutex_unlock(&vtpn->state_lock); + return; + } + ubcore_handle_disconnect_vtpn_wait_list(vtpn, ETIMEDOUT, + UBCORE_VTPS_ERROR); + mutex_unlock(&vtpn->state_lock); + + if (atomic_read(&vtpn->use_cnt) == 0) { + if (ubcore_queue_destroy_vtp_task(dev, vtpn, 0) != 0) + (void)ubcore_free_vtpn(vtpn); + } +} + +static void ubcore_wait_disconnect_vtp_resp_timeout(struct work_struct *work) +{ + struct ubcore_wait_vtpn_resp_work *wait_work; + struct ubcore_msg_session *s; + + ubcore_log_info("Failed to wait disconnect vtp resp, time out.\n"); + wait_work = container_of(work, struct ubcore_wait_vtpn_resp_work, + delay_work.work); + + s = ubcore_find_msg_session(wait_work->msg_id); + if (s == NULL) { + ubcore_log_err("Failed to find msg session.\n"); + return; + } + + if (!ubcore_set_session_finish(s)) { + ubcore_log_err("Failed to trylock and modify state.\n"); + (void)kref_put(&s->kref, ubcore_free_msg_session); + return; + } + + ubcore_wait_disconnect_vtp_resp_timeout_inner( + wait_work->vtpn, wait_work->dev, wait_work); + kfree(s->req); + s->req = NULL; + (void)kref_put(&s->kref, ubcore_free_msg_session); + ubcore_destroy_msg_session(s); + kfree(wait_work); +} + +static struct ubcore_wait_vtpn_resp_work * +ubcore_queue_wait_disconnect_vtp_resp_task(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn, + int timeout) +{ + struct ubcore_wait_vtpn_resp_work *wait_work; + int ret; + + wait_work = + kzalloc(sizeof(struct ubcore_wait_vtpn_resp_work), GFP_KERNEL); + if (wait_work == NULL) + return NULL; + + wait_work->dev = dev; + wait_work->s = NULL; + wait_work->param.trans_mode = vtpn->trans_mode; + wait_work->param.local_eid = vtpn->local_eid; + wait_work->param.peer_eid = vtpn->peer_eid; + wait_work->param.local_jetty = vtpn->local_jetty; + wait_work->param.peer_jetty = vtpn->peer_jetty; + wait_work->param.eid_index = vtpn->eid_index; + wait_work->vtpn = vtpn; + wait_work->msg_id = 0; + if (timeout <= 0 || timeout > UBCORE_DESTROY_TIMEOUT) + wait_work->timeout = UBCORE_DESTROY_TIMEOUT; + else + wait_work->timeout = timeout; + + INIT_DELAYED_WORK(&wait_work->delay_work, + ubcore_wait_disconnect_vtp_resp_timeout); + ret = ubcore_queue_delayed_work(UBCORE_DISCONNECT_VTP_ASYNC_WQ, + &wait_work->delay_work, + msecs_to_jiffies(wait_work->timeout)); + if (ret != 0) { + ubcore_log_err( + "Failed to queue wait disconnect vtp resp work.\n"); + kfree(wait_work); + return NULL; + } + return wait_work; +} + +static struct ubcore_msg_session * +ubcore_create_async_disconnect_vtp_req(struct ubcore_vtpn *vtpn) +{ + uint32_t data_len = (uint32_t)sizeof(struct ubcore_create_vtp_req); + struct ubcore_create_vtp_req *destroy; + struct ubcore_req *req; + struct ubcore_msg_session *s; + + req = kzalloc(sizeof(struct ubcore_req) + data_len, GFP_KERNEL); + if (req == NULL) + return NULL; + req->opcode = UBCORE_MSG_DESTROY_VTP; + req->len = data_len; + + destroy = (struct ubcore_create_vtp_req *)req->data; + destroy->vtpn = vtpn->vtpn; + destroy->trans_mode = vtpn->trans_mode; + destroy->local_eid = vtpn->local_eid; + destroy->peer_eid = vtpn->peer_eid; + destroy->eid_index = vtpn->eid_index; + destroy->local_jetty = vtpn->local_jetty; + destroy->peer_jetty = vtpn->peer_jetty; + (void)strscpy(destroy->dev_name, vtpn->ub_dev->dev_name, + UBCORE_MAX_DEV_NAME); + destroy->virtualization = vtpn->ub_dev->attr.virtualization; + + s = ubcore_create_ue2mue_session(req, vtpn); + if (s == NULL) + kfree(req); + return s; +} + +static struct ubcore_vtpn *ubcore_alloc_vtpn(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_vtpn *vtpn; + + if (dev->ops == NULL || dev->ops->alloc_vtpn == NULL) + return ERR_PTR(-EINVAL); + + vtpn = dev->ops->alloc_vtpn(dev); + if (IS_ERR_OR_NULL(vtpn)) { + ubcore_log_err("failed to alloc vtpn!, dev_name:%s", + dev->dev_name); + return UBCORE_CHECK_RETURN_ERR_PTR(vtpn, ENOEXEC); + } + + vtpn->ub_dev = dev; + atomic_set(&vtpn->use_cnt, 0); + kref_init(&vtpn->ref_cnt); + init_completion(&vtpn->comp); + vtpn->trans_mode = param->trans_mode; + vtpn->local_eid = param->local_eid; + vtpn->peer_eid = param->peer_eid; + vtpn->eid_index = param->eid_index; + vtpn->local_jetty = param->local_jetty; + vtpn->peer_jetty = param->peer_jetty; + vtpn->state = UBCORE_VTPS_RESET; + mutex_init(&vtpn->state_lock); + INIT_LIST_HEAD(&vtpn->list); + INIT_LIST_HEAD(&vtpn->disconnect_list); + return vtpn; +} + +static struct ubcore_vtpn * + ubcore_create_vtpn(struct ubcore_device *dev, + struct ubcore_vtp_param *param, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtpn *vtpn; + + vtpn = kcalloc(1, sizeof(struct ubcore_vtpn), GFP_KERNEL); + if (IS_ERR_OR_NULL(vtpn)) + return NULL; + + vtpn->ub_dev = dev; + atomic_set(&vtpn->use_cnt, 0); + kref_init(&vtpn->ref_cnt); + init_completion(&vtpn->comp); + vtpn->trans_mode = param->trans_mode; + vtpn->local_eid = param->local_eid; + vtpn->peer_eid = param->peer_eid; + vtpn->eid_index = param->eid_index; + vtpn->local_jetty = param->local_jetty; + vtpn->peer_jetty = param->peer_jetty; + vtpn->state = UBCORE_VTPS_RESET; + mutex_init(&vtpn->state_lock); + INIT_LIST_HEAD(&vtpn->list); + INIT_LIST_HEAD(&vtpn->disconnect_list); + if (active_tp_cfg != NULL) { + vtpn->vtpn = (uint32_t)active_tp_cfg->tp_handle.bs.tpid; + vtpn->tp_handle = active_tp_cfg->tp_handle.value; + vtpn->peer_tp_handle = active_tp_cfg->peer_tp_handle.value; + vtpn->tag = active_tp_cfg->tag; + } + if (udata != NULL) + vtpn->uspace = true; + + return vtpn; +} + +static void ubcore_vtpn_kref_release(struct kref *ref_cnt) +{ + struct ubcore_vtpn *vtpn = + container_of(ref_cnt, struct ubcore_vtpn, ref_cnt); + + complete(&vtpn->comp); +} + +void ubcore_vtpn_kref_put(struct ubcore_vtpn *vtpn) +{ + (void)kref_put(&vtpn->ref_cnt, ubcore_vtpn_kref_release); +} + +void ubcore_vtpn_get(void *obj) +{ + struct ubcore_vtpn *vtpn = obj; + + kref_get(&vtpn->ref_cnt); +} + +static void ubcore_vtp_unmap_attr(struct ubcore_vtp_cfg *cfg) +{ + if (cfg->flag.bs.clan_tp) { + atomic_dec(&cfg->ctp->use_cnt); + return; + } + if (cfg->trans_mode != UBCORE_TP_UM) + ubcore_tpg_kref_put(cfg->tpg); + else + ubcore_utp_kref_put(cfg->utp); +} + +static void ubcore_vtp_kref_release(struct kref *ref_cnt) +{ + struct ubcore_vtp *vtp = + container_of(ref_cnt, struct ubcore_vtp, ref_cnt); + struct ubcore_device *ub_dev = vtp->ub_dev; + struct ubcore_vtp_cfg cfg = vtp->cfg; + + /* pseudo vtp */ + if (vtp->cfg.vtpn == UINT_MAX) { + ubcore_vtp_unmap_attr(&cfg); + kfree(vtp); + return; + } + if (ub_dev == NULL || ub_dev->ops == NULL || + ub_dev->ops->destroy_vtp == NULL) + return; + ub_dev->ops->destroy_vtp(vtp); + ubcore_vtp_unmap_attr(&cfg); +} + +void ubcore_vtp_kref_put(struct ubcore_vtp *vtp) +{ + ubcore_log_debug("put vtpn:%u, dev:%s,, refcnt to: %u", vtp->cfg.vtpn, + vtp->ub_dev->dev_name, kref_read(&vtp->ref_cnt) - 1); + (void)kref_put(&vtp->ref_cnt, ubcore_vtp_kref_release); +} + +void ubcore_vtp_get(void *obj) +{ + struct ubcore_vtp *vtp = obj; + + ubcore_log_debug("get vtpn:%u, dev:%s,, refcnt to: %u", vtp->cfg.vtpn, + vtp->ub_dev->dev_name, kref_read(&vtp->ref_cnt) + 1); + kref_get(&vtp->ref_cnt); +} + +static int ubcore_free_vtpn(struct ubcore_vtpn *vtpn) +{ + struct ubcore_device *dev = vtpn->ub_dev; + + if (dev == NULL || dev->ops == NULL || dev->ops->free_vtpn == NULL) { + ubcore_log_err( + "dev == NULL || dev->ops == NULL || dev->ops->free_vtpn == NULL"); + return -EINVAL; + } + + if (atomic_read(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + return 0; + } + ubcore_vtpn_kref_put(vtpn); + wait_for_completion(&vtpn->comp); + mutex_destroy(&vtpn->state_lock); + + if (vtpn->tp_handle != 0) { + kfree(vtpn); + return 0; + } + return dev->ops->free_vtpn(vtpn); +} + +static int ubcore_free_vtpn_ctrlplane(struct ubcore_vtpn *vtpn) +{ + if (atomic_read(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + return 0; + } + ubcore_vtpn_kref_put(vtpn); + wait_for_completion(&vtpn->comp); + mutex_destroy(&vtpn->state_lock); + + if (vtpn->tp_handle == 0) + ubcore_log_err("Invalid tp_handle.\n"); + + kfree(vtpn); + return 0; +} + +static struct ubcore_hash_table * +ubcore_get_vtpn_ht(struct ubcore_device *dev, + enum ubcore_transport_mode trans_mode) +{ + if (trans_mode == UBCORE_TP_RM) + return &dev->ht[UBCORE_HT_RM_VTPN]; + + if (trans_mode == UBCORE_TP_RC) + return &dev->ht[UBCORE_HT_RC_VTPN]; + + if (trans_mode == UBCORE_TP_UM) + return &dev->ht[UBCORE_HT_UM_VTPN]; + + return NULL; +} + +static struct ubcore_vtpn *ubcore_find_get_vtpn(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_hash_table *ht; + uint32_t hash; + + ht = ubcore_get_vtpn_ht(dev, param->trans_mode); + if (ht == NULL) + return NULL; + + hash = ubcore_get_vtpn_hash(ht, ¶m->local_eid); + return ubcore_hash_table_lookup_get(ht, hash, ¶m->local_eid); +} + +static int ubcore_find_add_vtpn(struct ubcore_device *dev, + struct ubcore_vtpn *new_vtpn, + struct ubcore_vtpn **exist_vtpn, + struct ubcore_vtp_param *p) +{ + struct ubcore_hash_table *ht; + uint32_t hash; + + ht = ubcore_get_vtpn_ht(dev, new_vtpn->trans_mode); + if (ht == NULL || ht->head == NULL) { + ubcore_log_err("hash table's head equals NULL"); + return -EINVAL; + } + hash = ubcore_get_vtpn_hash(ht, &new_vtpn->local_eid); + + spin_lock(&ht->lock); + *exist_vtpn = ubcore_hash_table_lookup_nolock_get(ht, hash, + &new_vtpn->local_eid); + if (*exist_vtpn != NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubcore_hash_table_add_nolock(ht, &new_vtpn->hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +static void ubcore_hash_table_rmv_vtpn(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn) +{ + struct ubcore_hash_table *ht; + + ht = ubcore_get_vtpn_ht(dev, vtpn->trans_mode); + if (ht == NULL) + return; + ubcore_hash_table_remove(ht, &vtpn->hnode); +} + +static struct ubcore_vtpn *ubcore_reuse_vtpn(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn) +{ + int i = 0; + + mutex_lock(&vtpn->state_lock); + if (vtpn->state == UBCORE_VTPS_READY) { + atomic_inc(&vtpn->use_cnt); + mutex_unlock(&vtpn->state_lock); + ubcore_log_info("Success to reuse vtpn:%u", vtpn->vtpn); + ubcore_vtpn_kref_put(vtpn); + return vtpn; + } + + for (i = 0; i < UBCORE_SYNC_VTPN_MAX_WAIT_TIMES; i++) { + if (vtpn->state == UBCORE_VTPS_READY) { + atomic_inc(&vtpn->use_cnt); + mutex_unlock(&vtpn->state_lock); + ubcore_log_info("Success to reuse vtpn:%u", vtpn->vtpn); + ubcore_vtpn_kref_put(vtpn); + return vtpn; + } else if (vtpn->state == UBCORE_VTPS_RESET) { + mutex_unlock(&vtpn->state_lock); + msleep(UBCORE_SYNC_REUSE_VTPN_ONE_TIME); + mutex_lock(&vtpn->state_lock); + } else if (vtpn->state == UBCORE_VTPS_WAIT_DESTROY) { + break; + } + } + ubcore_log_warn("failed to reuse vtpn:%u, use_cnt:%d", vtpn->vtpn, + atomic_read(&vtpn->use_cnt)); + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + return NULL; +} + +struct ubcore_vtpn *ubcore_connect_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *param) +{ + struct ubcore_vtpn *exist_vtpn = NULL; + struct ubcore_vtpn *vtpn; + int ret; + + if (dev == NULL || param == NULL) { + ubcore_log_err("Invalid param"); + return ERR_PTR(-EINVAL); + } + + if (ubcore_check_port_state(dev) != 0) { + ubcore_log_err("Check port status Failed"); + return NULL; + } + + // 1. try to reuse vtpn + vtpn = ubcore_find_get_vtpn(dev, param); + if (vtpn != NULL) + return ubcore_reuse_vtpn(dev, vtpn); + + // 2. alloc new vtpn + vtpn = ubcore_alloc_vtpn(dev, param); + if (IS_ERR_OR_NULL(vtpn)) { + ubcore_log_err("failed to alloc vtpn!"); + return vtpn; + } + + // 3. add vtpn to hashtable + ret = ubcore_find_add_vtpn(dev, vtpn, &exist_vtpn, param); + if (ret == -EEXIST && exist_vtpn != NULL) { + exist_vtpn = + ubcore_reuse_vtpn(dev, exist_vtpn); // reuse immediately + (void)ubcore_free_vtpn(vtpn); + return exist_vtpn; + } else if (ret != 0) { + (void)ubcore_free_vtpn(vtpn); + return NULL; + } + + // 4. Send connecting msg + mutex_lock(&vtpn->state_lock); + ret = ubcore_send_create_vtp_req(dev, param, vtpn); + if (ret == 0) { + atomic_inc(&vtpn->use_cnt); + vtpn->state = UBCORE_VTPS_READY; + } else { + vtpn->state = UBCORE_VTPS_WAIT_DESTROY; + } + mutex_unlock(&vtpn->state_lock); + + // 4. failed roll back + if (ret != 0) { + ubcore_log_err("failed to send create vtp req, vtpn:%u", + vtpn->vtpn); + ubcore_hash_table_rmv_vtpn(dev, vtpn); + (void)ubcore_free_vtpn(vtpn); + return ERR_PTR(ret); + } + + ubcore_log_info("connect vtpn:%u, trans_mode:%u", vtpn->vtpn, + vtpn->trans_mode); + return vtpn; +} + +static int ubcore_active_tp(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_vtpn *vtpn) +{ + int ret; + + if (dev->ops == NULL || dev->ops->active_tp == NULL || + dev->ops->deactive_tp == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + ret = dev->ops->active_tp(dev, active_tp_cfg); + if (ret != 0) { + ubcore_log_err("Failed to active tp, ret: %d, dev_name: %s.\n", + ret, dev->dev_name); + return ret; + } + + vtpn->vtpn = (uint32_t)active_tp_cfg->tp_handle.bs.tpid; + return 0; +} + +struct ubcore_vtpn * +ubcore_find_get_vtpn_ctrlplane(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_tp_cfg) +{ + struct ubcore_hash_table *ht = &dev->ht[UBCORE_HT_CP_VTPN]; + uint32_t hash; + + hash = ubcore_get_vtpn_hash(ht, &active_tp_cfg->tp_handle.value); + return ubcore_hash_table_lookup_get(ht, hash, + &active_tp_cfg->tp_handle.value); +} + +static int ubcore_find_add_vtpn_ctrlplane(struct ubcore_device *dev, + struct ubcore_vtpn *new_vtpn, + struct ubcore_vtpn **exist_vtpn) +{ + struct ubcore_hash_table *ht = &dev->ht[UBCORE_HT_CP_VTPN]; + uint32_t hash; + + hash = ubcore_get_vtpn_hash(ht, &new_vtpn->tp_handle); + + spin_lock(&ht->lock); + *exist_vtpn = ubcore_hash_table_lookup_nolock_get(ht, hash, + &new_vtpn->tp_handle); + if (*exist_vtpn != NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubcore_hash_table_add_nolock(ht, &new_vtpn->hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +static void ubcore_hash_table_rmv_vtpn_ctrlplane(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn) +{ + struct ubcore_hash_table *ht = &dev->ht[UBCORE_HT_CP_VTPN]; + + ubcore_hash_table_remove(ht, &vtpn->hnode); +} + +struct ubcore_vtpn * + ubcore_connect_vtp_ctrlplane(struct ubcore_device *dev, + struct ubcore_vtp_param *param, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata) +{ + struct ubcore_vtpn *exist_vtpn = NULL; + struct ubcore_vtpn *vtpn; + int ret; + + if (ubcore_check_port_state(dev) != 0) { + ubcore_log_err("Check port status Failed.\n"); + return NULL; + } + + // 1. try to reuse vtpn + vtpn = ubcore_find_get_vtpn_ctrlplane(dev, active_tp_cfg); + if (vtpn != NULL) + return ubcore_reuse_vtpn(dev, vtpn); + + // 2. alloc new vtpn + vtpn = ubcore_create_vtpn(dev, param, active_tp_cfg, udata); + if (IS_ERR_OR_NULL(vtpn)) { + ubcore_log_err("failed to alloc vtpn.\n"); + return vtpn; + } + + // 3. add vtpn to hashtable + ret = ubcore_find_add_vtpn_ctrlplane(dev, vtpn, &exist_vtpn); + if (ret == -EEXIST && exist_vtpn != NULL) { + exist_vtpn = + ubcore_reuse_vtpn(dev, exist_vtpn); // reuse immediately + (void)ubcore_free_vtpn_ctrlplane(vtpn); + return exist_vtpn; + } else if (ret != 0) { + (void)ubcore_free_vtpn_ctrlplane(vtpn); + return NULL; + } + + // 4. active tp + mutex_lock(&vtpn->state_lock); + ret = ubcore_active_tp(dev, active_tp_cfg, vtpn); + if (ret == 0) { + atomic_inc(&vtpn->use_cnt); + vtpn->state = UBCORE_VTPS_READY; + } else { + vtpn->state = UBCORE_VTPS_WAIT_DESTROY; + } + mutex_unlock(&vtpn->state_lock); + + // 5. failed roll back + if (ret != 0) { + ubcore_log_err("failed to active tp, vtpn:%u", vtpn->vtpn); + ubcore_hash_table_rmv_vtpn_ctrlplane(dev, vtpn); + (void)ubcore_free_vtpn_ctrlplane(vtpn); + return ERR_PTR(ret); + } + + ubcore_log_info("connect vtpn:%u, trans_mode:%u, tp_handle: %llu.\n", + vtpn->vtpn, vtpn->trans_mode, + active_tp_cfg->tp_handle.value); + return vtpn; +} + +static struct ubcore_vtpn * +ubcore_reuse_vtpn_async(struct ubcore_device *dev, struct ubcore_vtpn *vtpn, + struct ubcore_vtpn_cb_para *para) +{ + mutex_lock(&vtpn->state_lock); + if (vtpn->state == UBCORE_VTPS_READY) { + atomic_inc(&vtpn->use_cnt); + para->tjetty->vtpn = vtpn; + + if (para->type == UBCORE_IMPORT_JETTY_VTPN) { + para->import_cb->callback(para->tjetty, 0, + para->import_cb->user_arg); + kfree(para->import_cb); + para->import_cb = NULL; + } + if (para->type == UBCORE_BIND_JETTY_VTPN) { + para->bind_cb->callback(para->jetty, para->tjetty, 0, + para->bind_cb->user_arg); + kfree(para->bind_cb); + para->bind_cb = NULL; + } + + mutex_unlock(&vtpn->state_lock); + ubcore_log_info("Success to reuse ready vtpn:%u", vtpn->vtpn); + ubcore_vtpn_kref_put(vtpn); + return vtpn; + } + + if (vtpn->state == UBCORE_VTPS_RESET) { + atomic_inc(&vtpn->use_cnt); + ubcore_add_async_wait_list(vtpn, para, NULL); + mutex_unlock(&vtpn->state_lock); + + ubcore_log_info("Success to reuse reset vtpn:%u, use_cnt:%d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + ubcore_vtpn_kref_put(vtpn); + return vtpn; + } + + ubcore_log_err("Unknown states, vtpn:%u, state:%d", vtpn->vtpn, + (int)vtpn->state); + mutex_unlock(&vtpn->state_lock); + + ubcore_vtpn_kref_put(vtpn); + return NULL; +} + +struct ubcore_vtpn *ubcore_connect_vtp_async(struct ubcore_device *dev, + struct ubcore_vtp_param *param, + int timeout, + struct ubcore_vtpn_cb_para *para) +{ + struct ubcore_vtpn *exist_vtpn = NULL; + struct ubcore_vtpn *vtpn; + struct ubcore_msg_session *s; + struct ubcore_wait_vtpn_resp_work *wait_work; + int ret; + + if (dev == NULL || param == NULL || para == NULL) { + ubcore_log_err("Invalid param"); + return ERR_PTR(-EINVAL); + } + + if (ubcore_check_port_state(dev) != 0) { + ubcore_log_err("Check port status Failed"); + return NULL; + } + + // 1. try to reuse vtpn + vtpn = ubcore_find_get_vtpn(dev, param); + if (vtpn != NULL) + return ubcore_reuse_vtpn_async(dev, vtpn, para); + + // 2. alloc new vtpn + vtpn = ubcore_alloc_vtpn(dev, param); + if (IS_ERR_OR_NULL(vtpn)) { + ubcore_log_err("failed to alloc vtpn!"); + return vtpn; + } + + // 3. add vtpn to hashtable + ret = ubcore_find_add_vtpn(dev, vtpn, &exist_vtpn, param); + if (ret == -EEXIST && exist_vtpn != NULL) { + exist_vtpn = ubcore_reuse_vtpn_async(dev, exist_vtpn, + para); // reuse immediately + (void)ubcore_free_vtpn(vtpn); + return exist_vtpn; + } else if (ret != 0) { + (void)ubcore_free_vtpn(vtpn); + return NULL; + } + + // 4. Send connecting msg and do not wait resp + mutex_lock(&vtpn->state_lock); + atomic_inc(&vtpn->use_cnt); + wait_work = + ubcore_queue_wait_connect_vtp_resp_task(dev, param, timeout); + if (wait_work == NULL) { + ubcore_log_err( + "failed to queue wait connect vtp resp task, vtpn:%u", + vtpn->vtpn); + goto ERR_WQ_WORK; + } + + s = ubcore_create_async_connect_vtp_req(dev, param, vtpn); + if (s == NULL) { + ubcore_log_err( + "failed to create connect vtp req and session, vtpn:%u", + vtpn->vtpn); + goto ERR_SESSION; + } + wait_work->s = s; + wait_work->msg_id = s->req->msg_id; + ubcore_add_async_wait_list(vtpn, para, wait_work); + mutex_unlock(&vtpn->state_lock); + + ret = ubcore_send_req(dev, s->req); + if (ret != 0) { + ubcore_log_err( + "Failed to send req, msg_id = %u, opcode = %u.\n", + s->req->msg_id, (uint16_t)s->req->opcode); + if (!ubcore_set_session_finish(s)) { + ubcore_log_err( + "Failed to lock session or session state is finish, msg_id = %u, opcode = %u.\n", + s->req->msg_id, (uint16_t)s->req->opcode); + (void)kref_put(&s->kref, ubcore_free_msg_session); + return vtpn; + } + kfree(s->req); + (void)kref_put(&s->kref, ubcore_free_msg_session); + ubcore_destroy_msg_session(s); + + mutex_lock(&vtpn->state_lock); + ubcore_del_async_wait_list(vtpn); + goto ERR_SESSION; + } + + ubcore_log_info("connect asynchronously vtpn:%u, trans_mode:%u", + vtpn->vtpn, vtpn->trans_mode); + (void)kref_put(&s->kref, ubcore_free_msg_session); + return vtpn; + +ERR_SESSION: + cancel_delayed_work_sync(&wait_work->delay_work); + if (wait_work != NULL) + kfree(wait_work); +ERR_WQ_WORK: + atomic_dec(&vtpn->use_cnt); + vtpn->state = UBCORE_VTPS_WAIT_DESTROY; + ubcore_hash_table_rmv_vtpn(dev, vtpn); + mutex_unlock(&vtpn->state_lock); + (void)ubcore_free_vtpn(vtpn); + return NULL; +} + +int ubcore_disconnect_vtp(struct ubcore_vtpn *vtpn) +{ + struct ubcore_device *dev; + uint64_t tp_handle; + int ret = 0; + + if (vtpn == NULL || vtpn->ub_dev == NULL) + return -EINVAL; + + tp_handle = vtpn->tp_handle; + dev = vtpn->ub_dev; + mutex_lock(&vtpn->state_lock); + if (atomic_dec_return(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + mutex_unlock(&vtpn->state_lock); + return 0; + } + if (tp_handle == 0) + ubcore_hash_table_rmv_vtpn(dev, vtpn); + else + ubcore_hash_table_rmv_vtpn_ctrlplane(dev, vtpn); + + if (atomic_read(&vtpn->use_cnt) > 0) { + mutex_unlock(&vtpn->state_lock); + return 0; + } + + if (vtpn->state == UBCORE_VTPS_READY) { + if (tp_handle != 0) + ret = ubcore_adapter_layer_disconnect(vtpn); + else + ret = ubcore_send_del_vtp_req(vtpn); + vtpn->state = UBCORE_VTPS_WAIT_DESTROY; + } else { + ubcore_log_info("vtp in deleted state, vtpn:%u, state%u", + vtpn->vtpn, vtpn->state); + } + ubcore_log_info_rl("disconnect vtpn:%u, ret:%d, vtp_state:%u", + vtpn->vtpn, ret, vtpn->state); + mutex_unlock(&vtpn->state_lock); + + if (atomic_read(&vtpn->use_cnt) == 0) { + if (ret == 0 || ret == -ENOENT || + (vtpn->tp_handle == 0 && + ubcore_queue_destroy_vtp_task(dev, vtpn, 0) != 0)) { + if (tp_handle != 0) + (void)ubcore_free_vtpn_ctrlplane(vtpn); + else + (void)ubcore_free_vtpn(vtpn); + } + } + + return (tp_handle != 0) ? ret : 0; +} + +int ubcore_disconnect_vtp_async(struct ubcore_vtpn *vtpn, int timeout, + struct ubcore_vtpn_cb_para *para) +{ + struct ubcore_device *dev; + struct ubcore_unimport_cb *unimport_cb; + struct ubcore_unbind_cb *unbind_cb; + struct ubcore_msg_session *s; + struct ubcore_wait_vtpn_resp_work *wait_work; + int ret; + + if (vtpn == NULL || vtpn->ub_dev == NULL || para == NULL) + return -EINVAL; + + dev = vtpn->ub_dev; + mutex_lock(&vtpn->state_lock); + if (atomic_dec_return(&vtpn->use_cnt) > 0) { + ubcore_log_info("vtpn in use, vtpn id = %u, vtpn use_cnt = %d", + vtpn->vtpn, atomic_read(&vtpn->use_cnt)); + + if (para->unimport_cb != NULL) { + unimport_cb = para->unimport_cb; + unimport_cb->callback(0, unimport_cb->user_arg); + kfree(unimport_cb); + } + + if (para->unbind_cb != NULL) { + unbind_cb = para->unbind_cb; + unbind_cb->callback(0, unbind_cb->user_arg); + kfree(unbind_cb); + } + mutex_unlock(&vtpn->state_lock); + return 0; + } + + if (atomic_read(&vtpn->use_cnt) > 0) { + mutex_unlock(&vtpn->state_lock); + return 0; + } + + ubcore_hash_table_rmv_vtpn(dev, vtpn); + if (vtpn->state == UBCORE_VTPS_READY) { + vtpn->state = UBCORE_VTPS_WAIT_DESTROY; + wait_work = ubcore_queue_wait_disconnect_vtp_resp_task( + dev, vtpn, timeout); + if (wait_work == NULL) { + ubcore_log_err( + "failed to queue wait disconnect vtp resp task, vtpn:%u", + vtpn->vtpn); + goto ERR_WQ_WORK; + } + + s = ubcore_create_async_disconnect_vtp_req(vtpn); + if (s == NULL) { + ubcore_log_err( + "failed to create disconnect vtp req and session, vtpn:%u", + vtpn->vtpn); + goto ERR_SESSION; + } + wait_work->s = s; + wait_work->msg_id = s->req->msg_id; + ubcore_add_async_wait_list(vtpn, para, wait_work); + mutex_unlock(&vtpn->state_lock); + + ret = ubcore_send_req(dev, s->req); + if (ret != 0) { + ubcore_log_err( + "Failed to send req, msg_id = %u, opcode = %u.\n", + s->req->msg_id, (uint16_t)s->req->opcode); + if (!ubcore_set_session_finish(s)) { + ubcore_log_err( + "Failed to trylock_and_modify_state, msg_id = %u, opcode = %u.\n", + s->req->msg_id, + (uint16_t)s->req->opcode); + (void)kref_put(&s->kref, + ubcore_free_msg_session); + return 0; + } + kfree(s->req); + (void)kref_put(&s->kref, ubcore_free_msg_session); + ubcore_destroy_msg_session(s); + + mutex_lock(&vtpn->state_lock); + ubcore_del_async_wait_list(vtpn); + goto ERR_SESSION; + } + + (void)kref_put(&s->kref, ubcore_free_msg_session); + } else { + ubcore_log_info("vtp in deleted state, vtpn:%u, state%u", + vtpn->vtpn, vtpn->state); + mutex_unlock(&vtpn->state_lock); + } + + return 0; + +ERR_SESSION: + cancel_delayed_work_sync(&wait_work->delay_work); + if (wait_work != NULL) + kfree(wait_work); +ERR_WQ_WORK: + mutex_unlock(&vtpn->state_lock); + if (atomic_read(&vtpn->use_cnt) == 0) { + if (ubcore_queue_destroy_vtp_task(dev, vtpn, 0) != 0) + (void)ubcore_free_vtpn(vtpn); + } + return 0; +} + +int ubcore_process_vtp_status_nofity(struct ubcore_device *dev, + struct ubcore_vtp_status_notify *msg) +{ + struct ubcore_vtp_param param; + struct ubcore_vtpn *vtpn; + + if (msg->status != UBCORE_VTPS_DELETED) + return 0; + + ubcore_log_info("notify vtpn:%u, trans_mode:%u, seid :" EID_FMT + ", deid:" EID_FMT " ", + msg->vtpn, msg->trans_mode, EID_ARGS(msg->local_eid), + EID_ARGS(msg->peer_eid)); + + memset(¶m, 0, sizeof(param)); + param.trans_mode = msg->trans_mode; + param.local_eid = msg->local_eid; + param.local_jetty = msg->local_jetty_id; + param.peer_eid = msg->peer_eid; + param.peer_jetty = msg->peer_jetty_id; + + vtpn = ubcore_find_get_vtpn(dev, ¶m); + if (!vtpn) + return 0; + + mutex_lock(&vtpn->state_lock); + if (vtpn->vtpn != msg->vtpn) { + ubcore_log_warn("ubcore vtpn:%u, msg vtpn%u not equal", + vtpn->vtpn, msg->vtpn); + mutex_unlock(&vtpn->state_lock); + ubcore_vtpn_kref_put(vtpn); + return 0; + } + + ubcore_handle_vtpn_wait_list(vtpn, dev, UBCORE_VTPS_ERROR, + ECONNREFUSED); + mutex_unlock(&vtpn->state_lock); + + ubcore_hash_table_rmv_vtpn(dev, vtpn); + + ubcore_log_info("vtpn:%u, trans to delete state", vtpn->vtpn); + ubcore_vtpn_kref_put(vtpn); + return 0; +} + +static int ubcore_find_add_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, + struct ubcore_vtp *vtp) +{ + struct ubcore_hash_table *ht = NULL; + uint32_t hash; + + switch (mode) { + case UBCORE_TP_RM: + ht = &dev->ht[UBCORE_HT_RM_VTP]; + hash = ubcore_get_vtp_hash(&vtp->cfg.local_eid); + break; + case UBCORE_TP_RC: + ht = &dev->ht[UBCORE_HT_RC_VTP]; + hash = ubcore_get_rc_vtp_hash(&vtp->cfg.peer_eid); + break; + case UBCORE_TP_UM: + ht = &dev->ht[UBCORE_HT_UM_VTP]; + hash = ubcore_get_vtp_hash(&vtp->cfg.local_eid); + break; + default: + ubcore_log_err("unknown mode"); + return -EINVAL; + } + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -1; + } + /* Old entry with the same key exists */ + if (ubcore_hash_table_lookup_nolock( + ht, hash, ubcore_ht_key(ht, &vtp->hnode)) != NULL) { + spin_unlock(&ht->lock); + ubcore_log_warn("find vtp vtpn:%u hash :%u", vtp->cfg.vtpn, + hash); + return -1; + } + ubcore_hash_table_add_nolock(ht, &vtp->hnode, hash); + ubcore_vtp_get(vtp); + spin_unlock(&ht->lock); + + return 0; +} + +static void ubcore_vtp_map_attr(struct ubcore_vtp *vtp, + struct ubcore_vtp_cfg *cfg) +{ + vtp->cfg.ue_idx = cfg->ue_idx; + vtp->cfg.local_jetty = cfg->local_jetty; + vtp->cfg.local_eid = cfg->local_eid; + vtp->cfg.peer_eid = cfg->peer_eid; + vtp->cfg.peer_jetty = cfg->peer_jetty; + vtp->cfg.flag = cfg->flag; + vtp->cfg.trans_mode = cfg->trans_mode; + + if (cfg->flag.bs.clan_tp) { + vtp->cfg.ctp = cfg->ctp; + atomic_inc(&cfg->ctp->use_cnt); + return; + } + if (cfg->trans_mode != UBCORE_TP_UM) { + vtp->cfg.tpg = cfg->tpg; + ubcore_tpg_get(cfg->tpg); + } else { + vtp->cfg.utp = cfg->utp; + ubcore_utp_get(cfg->utp); + } +} + +struct ubcore_vtp *ubcore_create_and_map_vtp(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg) +{ + struct ubcore_vtp *vtp; + int ret; + + if (dev->ops == NULL || dev->ops->create_vtp == NULL) + return ERR_PTR(-EINVAL); + + vtp = dev->ops->create_vtp(dev, cfg, NULL); + if (IS_ERR_OR_NULL(vtp)) { + ubcore_log_err("Failed to create vtp"); + if (vtp == NULL) + return ERR_PTR(-ENOEXEC); + return vtp; + } + kref_init(&vtp->ref_cnt); + vtp->ub_dev = dev; + ubcore_vtp_map_attr(vtp, cfg); + + ret = ubcore_find_add_vtp(dev, cfg->trans_mode, vtp); + if (ret != 0) { + ubcore_vtp_kref_put(vtp); + vtp = NULL; + ubcore_log_err("Failed to add vtp to the vtp table"); + return ERR_PTR(-ENOEXEC); + } + + return vtp; +} + +static void ubcore_remove_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, + struct ubcore_vtp *vtp) +{ + struct ubcore_hash_table *ht = NULL; + struct ubcore_vtp *find_vtp = NULL; + uint32_t hash; + + switch (mode) { + case UBCORE_TP_RM: + ht = &dev->ht[UBCORE_HT_RM_VTP]; + hash = ubcore_get_vtp_hash(&vtp->cfg.local_eid); + break; + case UBCORE_TP_RC: + ht = &dev->ht[UBCORE_HT_RC_VTP]; + hash = ubcore_get_rc_vtp_hash(&vtp->cfg.peer_eid); + break; + case UBCORE_TP_UM: + ht = &dev->ht[UBCORE_HT_UM_VTP]; + hash = ubcore_get_vtp_hash(&vtp->cfg.local_eid); + break; + default: + ubcore_log_err("unknown mode"); + return; + } + spin_lock(&ht->lock); + find_vtp = ubcore_hash_table_lookup_nolock( + ht, hash, ubcore_ht_key(ht, &vtp->hnode)); + if (find_vtp == NULL) { + spin_unlock(&ht->lock); + ubcore_log_warn("vtp:%d no find", vtp->cfg.vtpn); + return; + } + ubcore_hash_table_remove_nolock(ht, &find_vtp->hnode); + /* Pair with kref get in ubcore_find_add_vtp */ + ubcore_vtp_kref_put(find_vtp); + spin_unlock(&ht->lock); +} + +int ubcore_unmap_vtp(struct ubcore_vtp *vtp) +{ + struct ubcore_device *dev = NULL; + struct ubcore_vtp_cfg cfg; + int ret = 0; + + if (vtp == NULL) + return -EINVAL; + + dev = vtp->ub_dev; + if (dev == NULL || dev->ops == NULL || dev->ops->destroy_vtp == NULL) + return -EINVAL; + + cfg = vtp->cfg; + + ubcore_remove_vtp(dev, cfg.trans_mode, vtp); + + return ret; +} + +int ubcore_check_and_unmap_vtp(struct ubcore_vtp *vtp, uint32_t role) +{ + struct ubcore_device *dev = NULL; + struct ubcore_vtp *new_vtp = NULL; + struct ubcore_vtp_cfg cfg; + int ret = 0; + + if (vtp == NULL || vtp->ub_dev == NULL || vtp->ub_dev->ops == NULL || + vtp->ub_dev->ops->destroy_vtp == NULL) + return -EINVAL; + + dev = vtp->ub_dev; + + if (vtp->role != UBCORE_VTP_DUPLEX) + return ubcore_unmap_vtp(vtp); + + cfg = vtp->cfg; + + if (role == UBCORE_VTP_INITIATOR) { + // delete original vtp, create pseudo vtp + new_vtp = kcalloc(1, sizeof(struct ubcore_vtp), GFP_KERNEL); + if (new_vtp == NULL) + return -ENOMEM; + + new_vtp->ub_dev = dev; + new_vtp->role = UBCORE_VTP_TARGET; + new_vtp->eid_idx = vtp->eid_idx; + new_vtp->upi = vtp->upi; + new_vtp->share_mode = vtp->share_mode; + ubcore_vtp_map_attr(new_vtp, &vtp->cfg); + new_vtp->cfg.vtpn = UINT_MAX; + kref_init(&new_vtp->ref_cnt); + + ubcore_remove_vtp(dev, cfg.trans_mode, vtp); + + ret = ubcore_find_add_vtp(dev, new_vtp->cfg.trans_mode, + new_vtp); + ubcore_vtp_kref_put(new_vtp); + if (ret != 0) { + ubcore_log_err( + "Failed to add new vtp to the vtp table"); + return -1; + } + } else { + vtp->role = UBCORE_VTP_INITIATOR; + } + + return ret; +} + +struct ubcore_vtp *ubcore_find_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, + union ubcore_eid *local_eid, + union ubcore_eid *peer_eid) +{ + struct ubcore_vtp *vtp_entry; + + switch (mode) { + case UBCORE_TP_RM: + vtp_entry = ubcore_hash_table_lookup( + &dev->ht[UBCORE_HT_RM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + case UBCORE_TP_RC: + vtp_entry = ubcore_hash_table_lookup( + &dev->ht[UBCORE_HT_RC_VTP], + ubcore_get_rc_vtp_hash(peer_eid), peer_eid); + break; + case UBCORE_TP_UM: + vtp_entry = ubcore_hash_table_lookup( + &dev->ht[UBCORE_HT_UM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + default: + ubcore_log_err("unknown mode %u", mode); + vtp_entry = NULL; + } + return vtp_entry; +} + +struct ubcore_vtp *ubcore_find_get_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, + union ubcore_eid *local_eid, + union ubcore_eid *peer_eid) +{ + struct ubcore_vtp *vtp_entry; + + switch (mode) { + case UBCORE_TP_RM: + vtp_entry = ubcore_hash_table_lookup_get( + &dev->ht[UBCORE_HT_RM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + case UBCORE_TP_RC: + vtp_entry = ubcore_hash_table_lookup_get( + &dev->ht[UBCORE_HT_RC_VTP], + ubcore_get_rc_vtp_hash(peer_eid), peer_eid); + break; + case UBCORE_TP_UM: + vtp_entry = ubcore_hash_table_lookup_get( + &dev->ht[UBCORE_HT_UM_VTP], + ubcore_get_vtp_hash(local_eid), local_eid); + break; + default: + ubcore_log_err("unknown mode"); + vtp_entry = NULL; + } + return vtp_entry; +} + +void ubcore_set_vtp_param(struct ubcore_device *dev, struct ubcore_jetty *jetty, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_vtp_param *vtp_param) +{ + if (cfg->eid_index >= dev->eid_table.eid_cnt || + IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + ubcore_log_err("invalid param, eid_index[%u] >= eid_cnt[%u]", + cfg->eid_index, dev->eid_table.eid_cnt); + return; + } + + vtp_param->trans_mode = cfg->trans_mode; + + if (is_create_rc_shared_tp(cfg->trans_mode, cfg->flag.bs.order_type, + cfg->flag.bs.share_tp)) + vtp_param->trans_mode = UBCORE_TP_RM; + + /* + * RM/UM VTP for userspace app: get local eid from ucontext + * RM/UM VTP for kernel app: how to get local eid ? + * RC VTP: get eid from jetty + */ + vtp_param->local_eid = dev->eid_table.eid_entries[cfg->eid_index].eid; + vtp_param->peer_eid = cfg->id.eid; + if (jetty != NULL) + vtp_param->local_jetty = jetty->jetty_id.id; + else + vtp_param->local_jetty = 0; + + vtp_param->peer_jetty = cfg->id.id; + vtp_param->eid_index = cfg->eid_index; +} + +int ubcore_config_function_migrate_state(struct ubcore_device *dev, + uint16_t ue_idx, uint32_t cnt, + struct ubcore_ueid_cfg *cfg, + enum ubcore_mig_state state) +{ + int ret; + + if (cfg == NULL) { + ret = -EINVAL; + ubcore_log_err("ubcore ueid cfg is null"); + return ret; + } + + if (dev == NULL || dev->ops == NULL || + dev->ops->config_function_migrate_state == NULL) { + ret = -EINVAL; + ubcore_log_err("invalid param"); + return ret; + } + + ret = dev->ops->config_function_migrate_state(dev, ue_idx, cnt, cfg, + state); + if (ret < 0) + ubcore_log_err("Fail to config function migrate state"); + + return ret; +} + +int ubcore_modify_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *vtp_param, + struct ubcore_vtp_attr *vattr, + union ubcore_vtp_attr_mask *vattr_mask) +{ + struct ubcore_tpg *tmp; + struct ubcore_vtp *vtp; + + int ret = 0; + + if (dev == NULL || dev->ops == NULL || dev->ops->modify_vtp == NULL) { + ret = -EINVAL; + ubcore_log_err("Invalid param.\n"); + return ret; + } + + vtp = ubcore_find_get_vtp(dev, vtp_param->trans_mode, + &vtp_param->local_eid, &vtp_param->peer_eid); + if (vtp == NULL) { + ubcore_log_err("Fail to find vtp when modify vtp.\n"); + return -EINVAL; + } + + if (vtp->role != UBCORE_VTP_TARGET) { // switch to mig dest + ret = dev->ops->modify_vtp(vtp, vattr, vattr_mask); + if (ret != 0) { + ubcore_log_err( + "Fail to modify vtp when call ubcore ops, ret:%d.\n", + ret); + goto put_vtp; + } + } + + if (vtp_param->trans_mode == UBCORE_TP_UM) { + // no vice utp for now, need to add it; + ubcore_utp_kref_put(vtp->cfg.utp); + vtp->cfg.utp = vattr->tp.utp; + } else { + ubcore_tpg_kref_put(vtp->cfg.tpg); + tmp = vtp->cfg.tpg; + vtp->cfg.tpg = vattr->tp.tpg; + vtp->vice_tpg_info.vice_tpg = tmp; + } + + if (vtp->vice_tpg_info.node_state == STATE_READY) + vtp->vice_tpg_info.node_state = STATE_MIGRATING; + else if (vtp->vice_tpg_info.node_state == STATE_MIGRATING) + vtp->vice_tpg_info.node_state = STATE_READY; + else // STATE_NORMAL SHOULD NOT HAPPEN!!! + ubcore_log_warn( + "STATE is normal when modify vtp, vtpn:%u, tpgn:%u, vice tpgn:%u.\n", + vtp->cfg.vtpn, vtp->cfg.tpg->tpgn, + vtp->vice_tpg_info.vice_tpg->tpgn); + + ubcore_log_info("Now STATE is:%u, vtpn:%u, tpgn:%u, vice tpgn:%u.\n", + vtp->vice_tpg_info.node_state, vtp->cfg.vtpn, + vtp->cfg.tpg->tpgn, vtp->vice_tpg_info.vice_tpg->tpgn); + +put_vtp: + ubcore_vtp_kref_put(vtp); + return ret; +} + +struct ubcore_vtp *ubcore_check_and_map_vtp(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg, + uint32_t role) +{ + struct ubcore_vice_tpg_info vice_tpg_info = { 0 }; + uint32_t vtp_role = role; + struct ubcore_vtp *vtp; + int ret; + + if (dev->ops == NULL || dev->ops->create_vtp == NULL) + return NULL; + + vice_tpg_info.vice_role = UBCORE_VTP_NO_LOCATION; + vtp = ubcore_find_get_vtp(dev, cfg->trans_mode, &cfg->local_eid, + &cfg->peer_eid); + if (vtp != NULL) { + ubcore_log_info("vtp already exists"); + if (vtp->cfg.vtpn == UINT_MAX) { // only this may happen + vtp_role = (role == vtp->role) ? role : + UBCORE_VTP_DUPLEX; + vice_tpg_info = vtp->vice_tpg_info; + // delete original vtp + ubcore_remove_vtp(dev, cfg->trans_mode, vtp); + ubcore_vtp_kref_put(vtp); + ubcore_log_debug("vtpn is UINT_MAX, delete old one"); + } else { // this may happen during lm + ubcore_log_warn("now should add vice tpg, vtpn:%u.\n", + vtp->cfg.vtpn); + return vtp; + } + } else if (vtp == NULL && cfg->vtpn == UINT_MAX) { + /* vtp is forbidden to be created by driver when vtpn is UINT_MAX */ + ubcore_log_warn("Invalid operation with invalid vtpn.\n"); + return NULL; + } + + vtp = dev->ops->create_vtp(dev, cfg, NULL); + if (IS_ERR_OR_NULL(vtp)) { + ubcore_log_err("Failed to create vtp"); + return NULL; + } + kref_init(&vtp->ref_cnt); + vtp->ub_dev = dev; + vtp->role = vtp_role; + vtp->vice_tpg_info = vice_tpg_info; + ubcore_vtp_map_attr(vtp, cfg); + + ret = ubcore_find_add_vtp(dev, cfg->trans_mode, vtp); + if (ret != 0) { + ubcore_vtp_kref_put(vtp); + ubcore_log_err("Failed to add vtp to the vtp table"); + return NULL; + } + + return vtp; +} + +struct ubcore_vtp *ubcore_check_and_map_target_vtp(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg, + uint32_t role) +{ + struct ubcore_vtp *vtp = NULL; + int ret; + + vtp = ubcore_find_get_vtp(dev, cfg->trans_mode, &cfg->local_eid, + &cfg->peer_eid); + if (vtp != NULL) + return vtp; + + vtp = kcalloc(1, sizeof(struct ubcore_vtp), GFP_KERNEL); + if (vtp == NULL) + return NULL; + + vtp->ub_dev = dev; + vtp->role = role; + ubcore_vtp_map_attr(vtp, cfg); + vtp->vice_tpg_info.vice_role = UBCORE_VTP_NO_LOCATION; + vtp->cfg.vtpn = UINT_MAX; + kref_init(&vtp->ref_cnt); + + ret = ubcore_find_add_vtp(dev, cfg->trans_mode, vtp); + if (ret != 0) { + ubcore_vtp_kref_put(vtp); + ubcore_log_err("Failed to add vtp to the vtp table"); + return NULL; + } + return vtp; +} + +uint32_t ubcore_get_all_vtp_cnt(struct ubcore_hash_table *ht, + struct ubcore_device *dev, + uint32_t target_uvs_id) +{ + struct ubcore_vtp *vtp; + uint32_t cnt = 0; + uint32_t i = 0; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return cnt; + } + + for (; i < ht->p.size; i++) { + hlist_for_each_entry(vtp, &ht->head[i], hnode) { + if (ubcore_check_ue2uvs_mapping(dev, vtp->cfg.ue_idx, + target_uvs_id)) + ++cnt; + } + } + + spin_unlock(&ht->lock); + return cnt; +} + +static void ubcore_delay_destroy_vtp(struct work_struct *work) +{ + struct ubcore_vtp_work *vtp_work; + int ret; + + vtp_work = container_of(work, struct ubcore_vtp_work, delay_work.work); + ubcore_del_vtp_work_list(vtp_work); + + // To handle dev unregistering + if (!ubcore_check_dev_is_exist(vtp_work->dev->dev_name)) { + ubcore_log_warn("dev %s not exist\n", vtp_work->dev->dev_name); + (void)ubcore_free_vtpn(vtp_work->vtpn); + goto free_work; + } + + ret = ubcore_send_del_vtp_req(vtp_work->vtpn); + + vtp_work->retry_times++; + ubcore_log_warn("Retry to destroy vtpn:%u, retry_time:%u, ret:%d", + vtp_work->vtpn->vtpn, vtp_work->retry_times, ret); + + if (ret == 0 || ret == -ENOENT || + ubcore_queue_destroy_vtp_task(vtp_work->dev, vtp_work->vtpn, + vtp_work->retry_times) != 0) + (void)ubcore_free_vtpn(vtp_work->vtpn); + +free_work: + ubcore_put_device(vtp_work->dev); + kfree(vtp_work); +} + +int ubcore_queue_destroy_vtp_task(struct ubcore_device *dev, + struct ubcore_vtpn *vtpn, + uint32_t retry_times) +{ + struct ubcore_vtp_work *vtp_work; + uint32_t timeout; + int ret; + + if (retry_times >= DESTROY_VTP_MAX_RETRY_TIMES) + return -EINVAL; + + vtp_work = kzalloc(sizeof(struct ubcore_vtp_work), GFP_KERNEL); + if (vtp_work == NULL) + return -ENOMEM; + + vtp_work->dev = ubcore_find_device_with_name( + dev->dev_name); // to prevent dev unregistering + if (vtp_work->dev == NULL) { + kfree(vtp_work); + return -EINVAL; + } + + vtp_work->vtpn = vtpn; + vtp_work->type = UBCORE_TASK_DESTROY_VTP; + vtp_work->retry_times = retry_times; + INIT_DELAYED_WORK(&vtp_work->delay_work, ubcore_delay_destroy_vtp); + + timeout = (1 << retry_times) * DESTROY_VTP_INI_INTERVAL; + + ubcore_log_warn( + "queue delay work to destroy vtpn:%u, dev:%s, retry_time:%u", + vtp_work->vtpn->vtpn, dev->dev_name, retry_times); + + ret = ubcore_queue_delayed_work(UBCORE_VTP_TASK_WQ, + &vtp_work->delay_work, + msecs_to_jiffies(timeout)); + if (ret != 0) { + ubcore_log_warn("Fail to queue destroy vtp work"); + ubcore_put_device(vtp_work->dev); + kfree(vtp_work); + return ret; + } + + ubcore_add_vtp_work_list(vtp_work); + return 0; +} + +struct ubcore_vtp **ubcore_get_all_vtp(struct ubcore_hash_table *ht, + struct ubcore_device *dev, + uint32_t target_uvs_id, + uint32_t *dev_vtp_cnt) +{ + struct ubcore_vtp **vtp_entry; + struct ubcore_vtp *vtp; + uint32_t i = 0, j = 0; + + *dev_vtp_cnt = ubcore_get_all_vtp_cnt(ht, dev, target_uvs_id); + if (*dev_vtp_cnt == 0) + return NULL; + + vtp_entry = kcalloc( + 1, (*dev_vtp_cnt) * (uint32_t)sizeof(struct ubcore_vtp *), + GFP_KERNEL); + if (vtp_entry == NULL) + return NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + kfree(vtp_entry); + return NULL; + } + + for (; i < ht->p.size; i++) { + hlist_for_each_entry(vtp, &ht->head[i], hnode) { + if (ubcore_check_ue2uvs_mapping(dev, vtp->cfg.ue_idx, + target_uvs_id)) + vtp_entry[j++] = vtp; + } + } + + spin_unlock(&ht->lock); + return vtp_entry; +} diff --git a/drivers/ub/urma/ubcore/ubcore_vtp.h b/drivers/ub/urma/ubcore/ubcore_vtp.h new file mode 100644 index 000000000000..b9b290987aab --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_vtp.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore vtp header + * Author: Yan Fangfang + * Create: 2023-07-14 + * Note: + * History: 2023-07-14: Create file + */ + +#ifndef UBCORE_VTP_H +#define UBCORE_VTP_H + +#include +#include "ubcore_netlink.h" +#include "ubcore_msg.h" +#include "ubcore_netlink.h" +#include "ubcore_tp.h" + +#define UBCORE_VTP_TARGET 1 +#define UBCORE_VTP_INITIATOR 0 +#define UBCORE_VTP_DUPLEX 2 +#define UBCORE_VTP_NO_LOCATION 3 +#define UBCORE_MAX_UDRV_EXT_LEN 128 + +struct ubcore_vtp_param { + enum ubcore_transport_mode trans_mode; + /* vtpn key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t local_jetty; + uint32_t peer_jetty; + /* vtpn key end */ + uint32_t eid_index; + /* for alpha */ + struct ubcore_ta ta; +}; + +struct ubcore_create_vtp_req { + uint32_t vtpn; + enum ubcore_transport_mode trans_mode; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t eid_index; + uint32_t local_jetty; + uint32_t peer_jetty; + char dev_name[UBCORE_MAX_DEV_NAME]; + bool virtualization; + char muedev_name[UBCORE_MAX_DEV_NAME]; + + /* for alpha */ + struct ubcore_ta_data ta_data; + uint32_t udrv_in_len; + uint32_t ext_len; // deprecated keep zero + /* struct ubcore_udrv_priv->in_len + struct ubcore_tp_ext->len */ + uint8_t udrv_ext[UBCORE_MAX_UDRV_EXT_LEN]; + + /* For compatibility, do not change msg structure */ +}; + +struct ubcore_create_vtp_resp { + int ret; + uint32_t vtpn; +}; + +struct ubcore_destroy_vtp_resp { + int ret; +}; + +struct ubcore_vtp_status_notify { + char mue_name[UBCORE_MAX_DEV_NAME]; + uint32_t vtpn; + enum ubcore_transport_mode trans_mode; + enum ubcore_vtp_state status; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t local_jetty_id; // only for RC + uint32_t peer_jetty_id; // only for RC +}; + +/* map vtpn to tpg, tp, utp or ctp */ +struct ubcore_cmd_vtp_cfg { + uint16_t ue_idx; + uint32_t vtpn; + uint32_t local_jetty; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + uint32_t tpgn; + uint32_t tpn; + uint32_t utpn; + uint32_t ctpn; + uint32_t value; + }; +}; + +struct ubcore_migrate_vtp_req { + struct ubcore_cmd_vtp_cfg vtp_cfg; + char dev_name[UBCORE_MAX_DEV_NAME]; + enum ubcore_event_type event_type; +}; + +struct ubcore_wait_vtpn_resp_work { + struct delayed_work delay_work; + struct ubcore_device *dev; + struct ubcore_msg_session *s; + struct ubcore_vtp_param param; + int timeout; + struct ubcore_vtpn *vtpn; + uint32_t msg_id; +}; + +struct ubcore_vtpn_cb_para { + enum ubcore_tjetty_type type; + struct ubcore_tjetty *tjetty; + struct ubcore_jetty *jetty; + struct ubcore_import_cb *import_cb; + struct ubcore_bind_cb *bind_cb; + struct ubcore_unimport_cb *unimport_cb; + struct ubcore_unbind_cb *unbind_cb; +}; + +struct ubcore_vtpn_wait_cb_node { + struct list_head node; + struct ubcore_vtpn_cb_para para; + struct ubcore_wait_vtpn_resp_work *wait_work; +}; + +struct ubcore_vtpn *ubcore_find_get_vtpn_ctrlplane(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_tp_cfg); +void ubcore_vtpn_kref_put(struct ubcore_vtpn *vtpn); +void ubcore_add_async_wait_list(struct ubcore_vtpn *vtpn, + struct ubcore_vtpn_cb_para *para, struct ubcore_wait_vtpn_resp_work *wait_work); +void ubcore_del_async_wait_list(struct ubcore_vtpn *vtpn); +struct ubcore_vtpn *ubcore_connect_vtp(struct ubcore_device *dev, + struct ubcore_vtp_param *param); +struct ubcore_vtpn *ubcore_connect_vtp_ctrlplane(struct ubcore_device *dev, + struct ubcore_vtp_param *param, struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); +struct ubcore_vtpn *ubcore_connect_vtp_async(struct ubcore_device *dev, + struct ubcore_vtp_param *param, int timeout, struct ubcore_vtpn_cb_para *para); +int ubcore_disconnect_vtp(struct ubcore_vtpn *vtpn); +int ubcore_disconnect_vtp_async(struct ubcore_vtpn *vtpn, int timeout, + struct ubcore_vtpn_cb_para *para); +int ubcore_queue_destroy_vtp_task(struct ubcore_device *dev, struct ubcore_vtpn *vtpn, + uint32_t retry_times); +void ubcore_flush_dev_vtp_work(struct ubcore_device *dev); +void ubcore_wait_connect_vtp_resp_intime(struct ubcore_msg_session *s, + struct ubcore_device *dev, struct ubcore_resp *resp); +void ubcore_wait_disconnect_vtp_resp_intime(struct ubcore_msg_session *s, + struct ubcore_device *dev, struct ubcore_resp *resp); +/* map vtp to tpg, utp .... */ +struct ubcore_vtp *ubcore_create_and_map_vtp(struct ubcore_device *dev, struct ubcore_vtp_cfg *cfg); +struct ubcore_vtp *ubcore_check_and_map_vtp(struct ubcore_device *dev, struct ubcore_vtp_cfg *cfg, + uint32_t role); +struct ubcore_vtp *ubcore_check_and_map_target_vtp(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg, uint32_t role); +int ubcore_unmap_vtp(struct ubcore_vtp *vtp); +int ubcore_check_and_unmap_vtp(struct ubcore_vtp *vtp, uint32_t role); +/* find mapped vtp */ +struct ubcore_vtp *ubcore_find_vtp(struct ubcore_device *dev, enum ubcore_transport_mode mode, + union ubcore_eid *local_eid, union ubcore_eid *peer_eid); +struct ubcore_vtp *ubcore_find_get_vtp(struct ubcore_device *dev, + enum ubcore_transport_mode mode, union ubcore_eid *local_eid, union ubcore_eid *peer_eid); + +void ubcore_set_vtp_param(struct ubcore_device *dev, struct ubcore_jetty *jetty, + struct ubcore_tjetty_cfg *cfg, struct ubcore_vtp_param *vtp_param); +/* config function migrate state */ +int ubcore_config_function_migrate_state(struct ubcore_device *dev, uint16_t ue_idx, + uint32_t cnt, struct ubcore_ueid_cfg *cfg, enum ubcore_mig_state state); +int ubcore_modify_vtp(struct ubcore_device *dev, struct ubcore_vtp_param *vtp_param, + struct ubcore_vtp_attr *vattr, union ubcore_vtp_attr_mask *vattr_mask); + +uint32_t ubcore_get_all_vtp_cnt(struct ubcore_hash_table *ht, struct ubcore_device *dev, + uint32_t target_uvs_id); +/* returned list should be freed by caller */ +struct ubcore_vtp **ubcore_get_all_vtp(struct ubcore_hash_table *ht, struct ubcore_device *dev, + uint32_t target_uvs_id, uint32_t *dev_vtp_cnt); + +int ubcore_process_vtp_status_nofity(struct ubcore_device *dev, + struct ubcore_vtp_status_notify *msg); + +void ubcore_vtp_get(void *obj); +void ubcore_vtpn_get(void *obj); +void ubcore_vtp_kref_put(struct ubcore_vtp *vtp); +#endif diff --git a/drivers/ub/urma/ubcore/ubcore_workqueue.c b/drivers/ub/urma/ubcore/ubcore_workqueue.c new file mode 100644 index 000000000000..39ae70960cb9 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_workqueue.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Ubcore workqueue + * Author:Zhangjiayu + * Create: 2024-1-27 + * Note: + * History: 2024-1-27: Create file + */ + +#include "ubcore_log.h" +#include "ubcore_workqueue.h" + +static struct workqueue_struct *g_ubcore_workqueues[(int)UBCORE_QUEUE_TYPE_NUM]; +const char UBCORE_QUEUE_NAMES[(int)UBCORE_QUEUE_TYPE_NUM] + [UBCORE_QUEUE_NAME_LEN] = { + { "ubcore-device-wq" }, + { "ubcore-sip-wq" }, + { "ubcore-bond-wq" }, + { "ubcore-fb-wq" }, + { "ubcore-nl-wq" }, + { "ubcore-vtp-task-wq" }, + { "ubcore-connect-vtp-async-wq" }, + { "ubcore-disconnect-vtp-async-wq" } + }; + +static bool check_queue_type_valid(int queue_type) +{ + return (queue_type >= (int)UBCORE_DISPATCH_EVENT_WQ) && + (queue_type < (int)UBCORE_QUEUE_TYPE_NUM); +} + +void ubcore_flush_workqueue(int queue_type) +{ + if (!check_queue_type_valid(queue_type) || + g_ubcore_workqueues[queue_type] == NULL) { + ubcore_log_err( + "queue_type %d out of range or workqueue is NULL\n", + queue_type); + return; + } + + flush_workqueue(g_ubcore_workqueues[queue_type]); +} + +int ubcore_queue_work(int queue_type, struct work_struct *work) +{ + if (!check_queue_type_valid(queue_type) || + g_ubcore_workqueues[queue_type] == NULL) { + ubcore_log_err( + "queue_type %d out of range or workqueue is NULL\n", + queue_type); + return -EINVAL; + } + return queue_work(g_ubcore_workqueues[queue_type], work) ? 0 : -1; +} + +int ubcore_queue_delayed_work(int queue_type, struct delayed_work *work, + unsigned long timeout) +{ + if (!check_queue_type_valid(queue_type) || + g_ubcore_workqueues[queue_type] == NULL) { + ubcore_log_err( + "queue_type %d out of range or workqueue is NULL\n", + queue_type); + return -EINVAL; + } + return queue_delayed_work(g_ubcore_workqueues[queue_type], work, + timeout) ? + 0 : + -1; +} + +bool ubcore_modify_delay_work(int queue_type, struct delayed_work *work, + unsigned long timeout) +{ + if (!check_queue_type_valid(queue_type) || + g_ubcore_workqueues[queue_type] == NULL) { + ubcore_log_err( + "queue_type %d out of range or workqueue is NULL\n", + queue_type); + return -EINVAL; + } + return mod_delayed_work(g_ubcore_workqueues[queue_type], work, timeout); +} + +int ubcore_create_workqueues(void) +{ + uint32_t i, j; + + for (i = 0; i < UBCORE_QUEUE_TYPE_NUM; i++) { + if (i == UBCORE_NLMSG_WQ) + g_ubcore_workqueues[i] = alloc_workqueue( + "%s", 0, 1, UBCORE_QUEUE_NAMES[i]); + else + g_ubcore_workqueues[i] = alloc_workqueue( + "%s", 0, 0, UBCORE_QUEUE_NAMES[i]); + + if (g_ubcore_workqueues[i] == NULL) { + ubcore_log_err( + "Fail to alloc workqueue, queue type %u.\n", i); + break; + } + } + + if (i == UBCORE_QUEUE_TYPE_NUM) + return 0; + for (j = 0; j < i; j++) + destroy_workqueue(g_ubcore_workqueues[j]); + + return -1; +} + +void ubcore_destroy_workqueues(void) +{ + uint32_t i; + + for (i = 0; i < UBCORE_QUEUE_TYPE_NUM; i++) { + drain_workqueue(g_ubcore_workqueues[i]); + destroy_workqueue(g_ubcore_workqueues[i]); + } +} diff --git a/drivers/ub/urma/ubcore/ubcore_workqueue.h b/drivers/ub/urma/ubcore/ubcore_workqueue.h new file mode 100644 index 000000000000..654358eebdf5 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_workqueue.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Ubcore workqueue + * Author: Zhang Jiayu + * Create: 2024-1-27 + * Note: + * History: 2024-1-27: Create file + */ + +#ifndef UBCORE_WORKQUEUE_H +#define UBCORE_WORKQUEUE_H + +#include +#include + +#include + +#define UBCORE_QUEUE_NAME_LEN 32 + +enum ubcore_queue_type { + UBCORE_DISPATCH_EVENT_WQ = 0, + UBCORE_SIP_NOTIFY_WQ, + UBCORE_BOND_EVENT_WQ, + UBCORE_FRONT_BACK_WQ, /* For frontend and backend ubcore communication. */ + UBCORE_NLMSG_WQ, /* For send nl msg from ubcore to uvs */ + UBCORE_VTP_TASK_WQ, + UBCORE_CONNECT_VTP_ASYNC_WQ, + UBCORE_DISCONNECT_VTP_ASYNC_WQ, + UBCORE_QUEUE_TYPE_NUM +}; + +struct ubcore_front_back_work { + struct work_struct work; + struct ubcore_device *dev; + struct ubcore_req_host *req; +}; + +void ubcore_flush_workqueue(int queue_type); + +int ubcore_queue_work(int queue_type, struct work_struct *work); + +int ubcore_queue_delayed_work(int queue_type, struct delayed_work *work, + unsigned long timeout); + +bool ubcore_modify_delay_work(int queue_type, struct delayed_work *work, + unsigned long timeout); + +int ubcore_create_workqueues(void); + +void ubcore_destroy_workqueues(void); + +#endif diff --git a/drivers/ub/urma/uburma/Makefile b/drivers/ub/urma/uburma/Makefile new file mode 100644 index 000000000000..eef98adf3714 --- /dev/null +++ b/drivers/ub/urma/uburma/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# + +uburma-objs := uburma_log.o \ + uburma_mmap.o \ + uburma_cmd_tlv.o \ + uburma_event.o \ + uburma_uobj.o \ + uburma_cmd.o \ + uburma_dev_ops.o \ + uburma_main.o + +obj-$(CONFIG_UB_URMA) += uburma.o diff --git a/drivers/ub/urma/uburma/config/uburma.conf b/drivers/ub/urma/uburma/config/uburma.conf new file mode 100644 index 000000000000..57f35517e5a7 --- /dev/null +++ b/drivers/ub/urma/uburma/config/uburma.conf @@ -0,0 +1 @@ +uburma \ No newline at end of file diff --git a/drivers/ub/urma/uburma/uburma_cmd.c b/drivers/ub/urma/uburma/uburma_cmd.c new file mode 100644 index 000000000000..286997c8fca8 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd.c @@ -0,0 +1,3435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd implementation + * Author: Qian Guoxin + * Create: 2021-08-04 + * Note: + * History: 2021-08-04: Create file + * History: 2022-07-25: Yan Fangfang Change the prefix uburma_ioctl_ to uburma_cmd_ + */ + +#include +#include + +#include +#include +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_event.h" +#include "uburma_file_ops.h" +#include "uburma_uobj.h" +#include "uburma_cmd_tlv.h" + +#include "uburma_cmd.h" + +#define UBURMA_INVALID_TPN UINT_MAX +#define UBURMA_CREATE_JETTY_ARG_IN_RC_SHARE_TP_SHIFT 11 + +void uburma_cmd_inc(struct uburma_device *ubu_dev) +{ + atomic_inc(&ubu_dev->cmdcnt); +} + +void uburma_cmd_dec(struct uburma_device *ubu_dev) +{ + if (atomic_dec_and_test(&ubu_dev->cmdcnt)) + complete(&ubu_dev->cmddone); +} + +void uburma_cmd_flush(struct uburma_device *ubu_dev) +{ + uburma_cmd_dec(ubu_dev); + wait_for_completion(&ubu_dev->cmddone); +} + +static inline void fill_udata(struct ubcore_udata *out, + struct ubcore_ucontext *ctx, + struct uburma_cmd_udrv_priv *udata) +{ + out->uctx = ctx; + out->udrv_data = (struct ubcore_udrv_priv *)(void *)udata; +} + +static int uburma_cmd_create_ctx(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_ucontext *ucontext; + struct uburma_cmd_create_ctx arg; + struct uburma_uobj *uobj; + struct uburma_jfae_uobj *jfae; + union ubcore_eid eid; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + down_write(&file->ucontext_rwsem); + + if (file->ucontext != NULL) { + up_write(&file->ucontext_rwsem); + uburma_log_err( + "ucontext eixt, should not create ctx in same fd.\n"); + return -EEXIST; + } + + (void)memcpy(eid.raw, arg.in.eid, UBCORE_EID_SIZE); + ucontext = ubcore_alloc_ucontext( + ubc_dev, arg.in.eid_index, + (struct ubcore_udrv_priv *)(void *)&arg.udata); + if (IS_ERR_OR_NULL(ucontext)) { + up_write(&file->ucontext_rwsem); + return PTR_ERR(ucontext); + } + ucontext->eid = eid; + uobj = uobj_alloc(UOBJ_CLASS_JFAE, file); + if (IS_ERR_OR_NULL(uobj)) { + ret = PTR_ERR(uobj); + goto free_ctx; + } + + jfae = container_of(uobj, struct uburma_jfae_uobj, uobj); + uburma_init_jfae(jfae, ubc_dev); + ucontext->jfae = uobj; + arg.out.async_fd = uobj->id; + file->ucontext = ucontext; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto free_jfae; + + uobj_alloc_commit(uobj); + up_write(&file->ucontext_rwsem); + uburma_log_debug("uburma create context success.\n"); + return ret; + +free_jfae: + uobj_alloc_abort(uobj); +free_ctx: + ubcore_free_ucontext(ubc_dev, ucontext); + file->ucontext = NULL; + up_write(&file->ucontext_rwsem); + return ret; +} + +static void uburma_fill_attr(struct ubcore_seg_cfg *cfg, + struct uburma_cmd_register_seg *arg) +{ + cfg->va = arg->in.va; + cfg->len = arg->in.len; + cfg->flag.value = arg->in.flag; + cfg->token_value.token = arg->in.token; + cfg->iova = arg->in.va; +} + +static int uburma_cmd_alloc_token_id(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_alloc_token_id arg; + union ubcore_token_id_flag flag = { 0 }; + struct ubcore_udata udata = { 0 }; + struct ubcore_token_id *token_id; + struct uburma_uobj *uobj; + + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + flag.bs.multi_seg = arg.flag.bs.multi_seg; + fill_udata(&udata, file->ucontext, &arg.udata); + uobj = uobj_alloc(UOBJ_CLASS_TOKEN, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TOKEN alloc fail!\n"); + return -ENOMEM; + } + + token_id = ubcore_alloc_token_id(ubc_dev, flag, &udata); + if (IS_ERR_OR_NULL(token_id)) { + uburma_log_err("ubcore alloc token_id id failed.\n"); + ret = PTR_ERR(token_id); + goto err_free_uobj; + } + uobj->object = token_id; + arg.out.token_id = token_id->token_id; + arg.out.handle = (uint64_t)uobj->id; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto err_free_token_id; + + uobj_alloc_commit(uobj); + return 0; + +err_free_token_id: + (void)ubcore_free_token_id(token_id); +err_free_uobj: + uobj_alloc_abort(uobj); + return ret; +} + +static int uburma_cmd_free_token_id(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_free_token_id arg; + struct ubcore_token_id *token; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TOKEN, (int)arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find token id.\n"); + return -EINVAL; + } + + token = (struct ubcore_token_id *)uobj->object; + if (arg.in.token_id != token->token_id) { + uburma_log_err( + "ubcore remove token_id failed: non-consistent.\n"); + return -EPERM; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore remove commit token_id failed.\n"); + + uobj_put_del(uobj); + return ret; +} + +static int uburma_cmd_register_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_register_seg arg; + struct ubcore_seg_cfg cfg = { 0 }; + struct ubcore_target_seg *seg; + struct ubcore_udata udata = { 0 }; + struct uburma_uobj *uobj; + struct uburma_uobj *token_id_uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + token_id_uobj = uobj_get_read(UOBJ_CLASS_TOKEN, + (int)arg.in.token_id_handle, file); + if (!IS_ERR_OR_NULL(token_id_uobj)) + cfg.token_id = (struct ubcore_token_id *)token_id_uobj->object; + + uburma_fill_attr(&cfg, &arg); + cfg.eid_index = file->ucontext->eid_index; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_alloc(UOBJ_CLASS_SEG, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_SEG alloc fail!\n"); + ret = -ENOMEM; + goto err_put_token_id; + } + + seg = ubcore_register_seg(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(seg)) { + uburma_log_err_rl("ubcore_register_seg failed.\n"); + ret = PTR_ERR(seg); + goto err_free_uobj; + } + uobj->object = seg; + arg.out.token_id = seg->seg.token_id; + arg.out.handle = (uint64_t)uobj->id; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto err_delete_seg; + + if (!IS_ERR_OR_NULL(token_id_uobj)) + uobj_put_read(token_id_uobj); + uobj_alloc_commit(uobj); + return 0; + +err_delete_seg: + ubcore_unregister_seg(seg); +err_free_uobj: + uobj_alloc_abort(uobj); +err_put_token_id: + if (!IS_ERR_OR_NULL(token_id_uobj)) + uobj_put_read(token_id_uobj); + return ret; +} + +static int uburma_cmd_unregister_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unregister_seg arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_SEG, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find registered seg.\n"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unregister_seg failed.\n"); + + uobj_put_del(uobj); + return ret; +} + +static void uburma_write_async_event(struct ubcore_ucontext *ctx, + uint64_t event_data, uint32_t event_type, + struct list_head *obj_event_list, + uint32_t *counter) +{ + struct uburma_jfae_uobj *jfae; + + rcu_read_lock(); + jfae = rcu_dereference(ctx->jfae); + if (jfae == NULL) { + rcu_read_unlock(); + return; + } + uburma_write_event(&jfae->jfe, event_data, event_type, obj_event_list, + counter); + rcu_read_unlock(); +} + +void uburma_jfc_event_cb(struct ubcore_event *event, + struct ubcore_ucontext *ctx) +{ + struct uburma_jfc_uobj *jfc_uobj; + + if (event->element.jfc == NULL) + return; + + jfc_uobj = (struct uburma_jfc_uobj *) + event->element.jfc->jfc_cfg.jfc_context; + uburma_write_async_event(ctx, event->element.jfc->urma_jfc, + event->event_type, &jfc_uobj->async_event_list, + &jfc_uobj->async_events_reported); +} + +void uburma_jfs_event_cb(struct ubcore_event *event, + struct ubcore_ucontext *ctx) +{ + struct uburma_jfs_uobj *jfs_uobj; + + if (event->element.jfs == NULL) + return; + + jfs_uobj = (struct uburma_jfs_uobj *) + event->element.jfs->jfs_cfg.jfs_context; + uburma_write_async_event(ctx, event->element.jfs->urma_jfs, + event->event_type, &jfs_uobj->async_event_list, + &jfs_uobj->async_events_reported); +} + +void uburma_jfr_event_cb(struct ubcore_event *event, + struct ubcore_ucontext *ctx) +{ + struct uburma_jfr_uobj *jfr_uobj; + + if (event->element.jfr == NULL) + return; + + jfr_uobj = (struct uburma_jfr_uobj *) + event->element.jfr->jfr_cfg.jfr_context; + uburma_write_async_event(ctx, event->element.jfr->urma_jfr, + event->event_type, &jfr_uobj->async_event_list, + &jfr_uobj->async_events_reported); +} + +void uburma_jetty_event_cb(struct ubcore_event *event, + struct ubcore_ucontext *ctx) +{ + struct uburma_jetty_uobj *jetty_uobj; + + if (event->element.jetty == NULL) + return; + + jetty_uobj = (struct uburma_jetty_uobj *) + event->element.jetty->jetty_cfg.jetty_context; + uburma_write_async_event(ctx, event->element.jetty->urma_jetty, + event->event_type, + &jetty_uobj->async_event_list, + &jetty_uobj->async_events_reported); +} + +void uburma_jetty_grp_event_cb(struct ubcore_event *event, + struct ubcore_ucontext *ctx) +{ + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + + if (event->element.jetty_grp == NULL) + return; + + jetty_grp_uobj = + (struct uburma_jetty_grp_uobj *) + event->element.jetty_grp->jetty_grp_cfg.user_ctx; + uburma_write_async_event(ctx, event->element.jetty_grp->urma_jetty_grp, + event->event_type, + &jetty_grp_uobj->async_event_list, + &jetty_grp_uobj->async_events_reported); +} + +static int uburma_cmd_create_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfs arg; + struct ubcore_jfs_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct uburma_jfs_uobj *jfs_uobj; + struct uburma_uobj *jfc_uobj; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + cfg.depth = arg.in.depth; + cfg.flag.value = arg.in.flag; + cfg.eid_index = file->ucontext->eid_index; + cfg.trans_mode = arg.in.trans_mode; + cfg.max_sge = arg.in.max_sge; + cfg.max_rsge = arg.in.max_rsge; + cfg.max_inline_data = arg.in.max_inline_data; + cfg.rnr_retry = arg.in.rnr_retry; + cfg.err_timeout = arg.in.err_timeout; + cfg.priority = arg.in.priority; + + jfs_uobj = (struct uburma_jfs_uobj *)uobj_alloc(UOBJ_CLASS_JFS, file); + if (IS_ERR_OR_NULL(jfs_uobj)) { + uburma_log_err("UOBJ_CLASS_JFS alloc fail!\n"); + return -ENOMEM; + } + jfs_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfs_uobj->async_event_list); + cfg.jfs_context = jfs_uobj; + + jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.jfc_handle, file); + if (IS_ERR_OR_NULL(jfc_uobj)) { + uburma_log_err("failed to find jfc, jfc_handle:%llu.\n", + arg.in.jfc_handle); + ret = -EINVAL; + goto err_alloc_abort; + } + cfg.jfc = jfc_uobj->object; + fill_udata(&udata, file->ucontext, &arg.udata); + + jfs = ubcore_create_jfs(ubc_dev, &cfg, uburma_jfs_event_cb, &udata); + if (IS_ERR_OR_NULL(jfs)) { + uburma_log_err("create jfs or get jfs_id failed.\n"); + ret = PTR_ERR(jfs); + goto err_put_jfc; + } + jfs_uobj->uobj.object = jfs; + jfs->urma_jfs = arg.in.urma_jfs; + + /* Do not release jfae fd until jfs is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfs; + + arg.out.id = jfs->jfs_id.id; + arg.out.depth = jfs->jfs_cfg.depth; + arg.out.max_sge = jfs->jfs_cfg.max_sge; + arg.out.max_rsge = jfs->jfs_cfg.max_rsge; + arg.out.max_inline_data = jfs->jfs_cfg.max_inline_data; + arg.out.handle = jfs_uobj->uobj.id; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto err_put_jfae; + + uobj_put_read(jfc_uobj); + uobj_alloc_commit(&jfs_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfs: + ubcore_delete_jfs(jfs); +err_put_jfc: + uobj_put_read(jfc_uobj); +err_alloc_abort: + uobj_alloc_abort(&jfs_uobj->uobj); + return ret; +} + +static int uburma_cmd_modify_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfs arg; + struct ubcore_jfs_attr attr = { 0 }; + struct uburma_uobj *uobj; + struct ubcore_udata udata; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.state = arg.in.state; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfs.\n"); + return -EINVAL; + } + + jfs = (struct ubcore_jfs *)uobj->object; + ret = ubcore_modify_jfs(jfs, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfs failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_tlv_append(hdr, (void *)&arg); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_query_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_jfs arg; + struct ubcore_jfs_attr attr = { 0 }; + struct ubcore_jfs_cfg cfg = { 0 }; + struct uburma_uobj *uobj; + struct ubcore_jfs *jfs; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_read(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfs.\n"); + return -EINVAL; + } + + jfs = (struct ubcore_jfs *)uobj->object; + ret = ubcore_query_jfs(jfs, &cfg, &attr); + if (ret != 0) { + uobj_put_read(uobj); + uburma_log_err("query jfs failed, ret:%d.\n", ret); + return ret; + } + + arg.out.depth = cfg.depth; + arg.out.flag = cfg.flag.value; + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.priority = cfg.priority; + arg.out.max_sge = cfg.max_sge; + arg.out.max_rsge = cfg.max_rsge; + arg.out.max_inline_data = cfg.max_inline_data; + arg.out.rnr_retry = cfg.rnr_retry; + arg.out.err_timeout = cfg.err_timeout; + arg.out.state = (uint32_t)attr.state; + + ret = uburma_tlv_append(hdr, (void *)&arg); + uobj_put_read(uobj); + return ret; +} + +static int uburma_cmd_delete_jfs(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfs arg; + struct uburma_jfs_uobj *jfs_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFS, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfs"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jfs_uobj = container_of(uobj, struct uburma_jfs_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfs failed, ret:%d.\n", ret); + uobj_put(uobj); + uobj_put_del(uobj); + return ret; + } + + arg.out.async_events_reported = jfs_uobj->async_events_reported; + uobj_put(uobj); + uobj_put_del(uobj); + return uburma_tlv_append(hdr, (void *)&arg); +} + +static int uburma_cmd_delete_jfs_batch(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_jfs_uobj *jfs_uobj = NULL; + struct uburma_cmd_delete_jfs_batch arg; + struct uburma_uobj **uobj_arr = NULL; + uint32_t async_events_reported = 0; + struct uburma_uobj *uobj = NULL; + uint32_t bad_jfs_index = 0; + uint64_t *jfs_arr = NULL; + uint32_t arr_num; + int ret_bad; + int ret; + int i; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + arr_num = arg.in.jfs_num; + jfs_arr = kcalloc(arr_num, sizeof(uint64_t), GFP_KERNEL); + if (jfs_arr == NULL) + return -ENOMEM; + + ret = uburma_copy_from_user((void *)jfs_arr, + (void __user *)arg.in.jfs_ptr, + arr_num * sizeof(uint64_t)); + if (ret != 0) + goto free_jfs_arr; + + uobj_arr = kcalloc(arr_num, sizeof(struct uburma_uobj *), GFP_KERNEL); + if (uobj_arr == NULL) { + ret = -ENOMEM; + goto free_jfs_arr; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_get_del(UOBJ_CLASS_JFS, jfs_arr[i], file); + uobj_arr[i] = uobj; + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfs, index is %d.\n", i); + ret = -EINVAL; + goto free_uobj_arr; + } + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jfs_uobj = container_of(uobj, struct uburma_jfs_uobj, uobj); + async_events_reported += jfs_uobj->async_events_reported; + } + + ret = uobj_remove_commit_batch(uobj_arr, arr_num, &bad_jfs_index); + if (ret != 0) + uburma_log_err("delete jfs failed, ret:%d.\n", ret); + + arg.out.async_events_reported = async_events_reported; + arg.out.bad_jfs_index = bad_jfs_index; + + uobj_put_batch(uobj_arr, arr_num); + uobj_put_del_batch(uobj_arr, arr_num); + + ret_bad = uburma_tlv_append(hdr, (void *)&arg); + if (ret_bad != 0) { + uburma_log_err("uburma tlv append error, ret:%d.\n", ret_bad); + ret = ret_bad; + } + +free_uobj_arr: + kfree(uobj_arr); +free_jfs_arr: + kfree(jfs_arr); + return ret; +} + +static int uburma_cmd_import_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_seg arg; + struct ubcore_target_seg_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_target_seg *tseg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_SEG, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JFR alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.seg.ubva.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.seg.ubva.va = arg.in.va; + cfg.seg.len = arg.in.len; + cfg.seg.attr.value = arg.in.flag; + cfg.seg.token_id = arg.in.token_id; + fill_udata(&udata, file->ucontext, &arg.udata); + + tseg = ubcore_import_seg(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tseg)) { + uburma_log_err("import seg failed.\n"); + uobj_alloc_abort(uobj); + return PTR_ERR(tseg); + } + + uobj->object = tseg; + arg.out.handle = (uint64_t)uobj->id; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) { + (void)ubcore_unimport_seg(tseg); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return ret; +} + +static int uburma_cmd_unimport_seg(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_seg arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_SEG, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find imported target seg.\n"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("unimport seg failed.\n"); + + uobj_put_del(uobj); + return ret; +} + +static int uburma_cmd_create_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfr arg; + struct uburma_uobj *jfc_uobj; + struct uburma_jfr_uobj *jfr_uobj; + struct ubcore_jfr_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + cfg.id = arg.in.id; + cfg.depth = arg.in.depth; + cfg.eid_index = file->ucontext->eid_index; + cfg.flag.value = arg.in.flag; + cfg.max_sge = arg.in.max_sge; + cfg.min_rnr_timer = arg.in.min_rnr_timer; + cfg.trans_mode = arg.in.trans_mode; + cfg.token_value.token = arg.in.token; + fill_udata(&udata, file->ucontext, &arg.udata); + + jfr_uobj = (struct uburma_jfr_uobj *)uobj_alloc(UOBJ_CLASS_JFR, file); + if (IS_ERR_OR_NULL(jfr_uobj)) { + uburma_log_err("UOBJ_CLASS_JFR alloc fail!\n"); + return -ENOMEM; + } + jfr_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfr_uobj->async_event_list); + cfg.jfr_context = jfr_uobj; + + jfc_uobj = uobj_get_read(UOBJ_CLASS_JFC, arg.in.jfc_handle, file); + if (IS_ERR_OR_NULL(jfc_uobj)) { + uburma_log_err("failed to find jfc, jfc_handle:%llu.\n", + arg.in.jfc_handle); + ret = -EINVAL; + goto err_alloc_abort; + } + cfg.jfc = jfc_uobj->object; + + jfr = ubcore_create_jfr(ubc_dev, &cfg, uburma_jfr_event_cb, &udata); + if (IS_ERR_OR_NULL(jfr)) { + uburma_log_err("create jfr or get jfr_id failed.\n"); + ret = PTR_ERR(jfr); + goto err_put_jfc; + } + jfr_uobj->uobj.object = jfr; + jfr->urma_jfr = arg.in.urma_jfr; + + /* Do not release jfae fd until jfr is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfr; + + arg.out.id = jfr->jfr_id.id; + arg.out.depth = jfr->jfr_cfg.depth; + arg.out.max_sge = jfr->jfr_cfg.max_sge; + arg.out.handle = jfr_uobj->uobj.id; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto err_put_jfae; + + uobj_put_read(jfc_uobj); + uobj_alloc_commit(&jfr_uobj->uobj); + return ret; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfr: + (void)ubcore_delete_jfr(jfr); +err_put_jfc: + uobj_put_read(jfc_uobj); +err_alloc_abort: + uobj_alloc_abort(&jfr_uobj->uobj); + return ret; +} + +static int uburma_cmd_modify_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfr arg; + struct uburma_uobj *uobj; + struct ubcore_jfr_attr attr = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.rx_threshold = arg.in.rx_threshold; + attr.state = (enum ubcore_jfr_state)arg.in.state; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfr.\n"); + return -EINVAL; + } + + jfr = (struct ubcore_jfr *)uobj->object; + ret = ubcore_modify_jfr(jfr, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfr failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_tlv_append(hdr, (void *)&arg); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_query_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_jfr arg; + struct ubcore_jfr_attr attr = { 0 }; + struct ubcore_jfr_cfg cfg = { 0 }; + struct uburma_uobj *uobj; + struct ubcore_jfr *jfr; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_read(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfr.\n"); + return -EINVAL; + } + + jfr = (struct ubcore_jfr *)uobj->object; + ret = ubcore_query_jfr(jfr, &cfg, &attr); + if (ret != 0) { + uobj_put_read(uobj); + uburma_log_err("query jfr failed, ret:%d.\n", ret); + return ret; + } + + arg.out.depth = cfg.depth; + arg.out.flag = cfg.flag.value; + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.max_sge = cfg.max_sge; + arg.out.min_rnr_timer = cfg.min_rnr_timer; + arg.out.token = cfg.token_value.token; + arg.out.id = cfg.id; + + arg.out.rx_threshold = attr.rx_threshold; + arg.out.state = (uint32_t)attr.state; + + ret = uburma_tlv_append(hdr, (void *)&arg); + uobj_put_read(uobj); + return ret; +} + +static int uburma_cmd_delete_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfr arg; + struct uburma_jfr_uobj *jfr_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFR, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfr"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jfr_uobj = container_of(uobj, struct uburma_jfr_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfr failed, ret:%d.\n", ret); + uobj_put(uobj); + uobj_put_del(uobj); + return ret; + } + + arg.out.async_events_reported = jfr_uobj->async_events_reported; + uobj_put(uobj); + uobj_put_del(uobj); + return uburma_tlv_append(hdr, (void *)&arg); +} + +static int uburma_cmd_delete_jfr_batch(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_jfr_uobj *jfr_uobj = NULL; + struct uburma_cmd_delete_jfr_batch arg; + struct uburma_uobj **uobj_arr = NULL; + uint32_t async_events_reported = 0; + struct uburma_uobj *uobj = NULL; + uint32_t bad_jfr_index = 0; + uint64_t *jfr_arr = NULL; + uint32_t arr_num; + int ret_bad; + int ret; + int i; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + arr_num = arg.in.jfr_num; + jfr_arr = kcalloc(arr_num, sizeof(uint64_t), GFP_KERNEL); + if (jfr_arr == NULL) + return -ENOMEM; + + ret = uburma_copy_from_user((void *)jfr_arr, + (void __user *)arg.in.jfr_ptr, + arr_num * sizeof(uint64_t)); + if (ret != 0) + goto free_jfr_arr; + + uobj_arr = kcalloc(arr_num, sizeof(struct uburma_uobj *), GFP_KERNEL); + if (uobj_arr == NULL) { + ret = -ENOMEM; + goto free_jfr_arr; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_get_del(UOBJ_CLASS_JFR, jfr_arr[i], file); + uobj_arr[i] = uobj; + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfr, index is %d.\n", i); + ret = -EINVAL; + goto free_uobj_arr; + } + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jfr_uobj = container_of(uobj, struct uburma_jfr_uobj, uobj); + async_events_reported += jfr_uobj->async_events_reported; + } + + ret = uobj_remove_commit_batch(uobj_arr, arr_num, &bad_jfr_index); + if (ret != 0) + uburma_log_err("delete jfr failed, ret:%d.\n", ret); + + arg.out.async_events_reported = async_events_reported; + arg.out.bad_jfr_index = bad_jfr_index; + + uobj_put_batch(uobj_arr, arr_num); + uobj_put_del_batch(uobj_arr, arr_num); + + ret_bad = uburma_tlv_append(hdr, (void *)&arg); + if (ret_bad != 0) { + uburma_log_err("uburma tlv append error, ret:%d.\n", ret_bad); + ret = ret_bad; + } + +free_uobj_arr: + kfree(uobj_arr); +free_jfr_arr: + kfree(jfr_arr); + return ret; +} + +static int uburma_cmd_create_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfc arg; + struct uburma_jfc_uobj *jfc_uobj; + struct uburma_jfce_uobj *jfce; + struct ubcore_jfc_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfc *jfc; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + cfg.depth = arg.in.depth; + cfg.flag.value = arg.in.flag; + cfg.ceqn = arg.in.ceqn; + + /* jfce may be ERR_PTR */ + jfce = uburma_get_jfce_uobj(arg.in.jfce_fd, file); + if (arg.in.jfce_fd >= 0 && IS_ERR_OR_NULL(jfce)) { + uburma_log_err("Failed to get jfce.\n"); + return -EINVAL; + } + + fill_udata(&udata, file->ucontext, &arg.udata); + + jfc_uobj = (struct uburma_jfc_uobj *)uobj_alloc(UOBJ_CLASS_JFC, file); + if (IS_ERR_OR_NULL(jfc_uobj)) { + uburma_log_err("UOBJ_CLASS_JFC alloc fail!\n"); + ret = -1; + goto err_put_jfce; + } + jfc_uobj->comp_events_reported = 0; + jfc_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jfc_uobj->comp_event_list); + INIT_LIST_HEAD(&jfc_uobj->async_event_list); + cfg.jfc_context = jfc_uobj; + + jfc = ubcore_create_jfc(ubc_dev, &cfg, uburma_jfce_handler, + uburma_jfc_event_cb, &udata); + if (IS_ERR_OR_NULL(jfc)) { + uburma_log_err("create jfc or get jfc_id failed.\n"); + ret = PTR_ERR(jfc); + goto err_alloc_abort; + } + + jfc_uobj->jfce = (struct uburma_uobj *)jfce; + jfc_uobj->uobj.object = jfc; + jfc->urma_jfc = arg.in.urma_jfc; + + /* Do not release jfae fd until jfc is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jfc; + + arg.out.id = jfc->id; + arg.out.depth = jfc->jfc_cfg.depth; + arg.out.handle = jfc_uobj->uobj.id; + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) + goto err_put_jfae; + + uobj_alloc_commit(&jfc_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jfc: + (void)ubcore_delete_jfc(jfc); +err_alloc_abort: + uobj_alloc_abort(&jfc_uobj->uobj); +err_put_jfce: + if (!IS_ERR_OR_NULL(jfce)) + uobj_put(&jfce->uobj); + return ret; +} + +static int uburma_cmd_modify_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jfc arg; + struct uburma_uobj *uobj; + struct ubcore_jfc_attr attr = { 0 }; + struct ubcore_udata udata; + struct ubcore_jfc *jfc; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.moderate_count = arg.in.moderate_count; + attr.moderate_period = arg.in.moderate_period; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JFC, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfc.\n"); + return -EINVAL; + } + + jfc = (struct ubcore_jfc *)uobj->object; + ret = ubcore_modify_jfc(jfc, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jfc failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_tlv_append(hdr, (void *)&arg); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_delete_jfc(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jfc arg; + struct uburma_uobj *uobj; + struct uburma_jfc_uobj *jfc_uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JFC, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jfc.\n"); + return -EINVAL; + } + + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jfc_uobj = container_of(uobj, struct uburma_jfc_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfc failed, ret:%d.\n", ret); + uobj_put(uobj); + uobj_put_del(uobj); + return ret; + } + + arg.out.comp_events_reported = jfc_uobj->comp_events_reported; + arg.out.async_events_reported = jfc_uobj->async_events_reported; + uobj_put(uobj); + uobj_put_del(uobj); + return uburma_tlv_append(hdr, (void *)&arg); +} + +static int uburma_cmd_delete_jfc_batch(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_jfc_uobj *jfc_uobj = NULL; + struct uburma_cmd_delete_jfc_batch arg; + struct uburma_uobj **uobj_arr = NULL; + uint32_t async_events_reported = 0; + uint32_t comp_events_reported = 0; + struct uburma_uobj *uobj = NULL; + uint32_t bad_jfc_index = 0; + uint64_t *jfc_arr = NULL; + uint32_t arr_num; + int ret_bad; + int ret; + int i; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + arr_num = arg.in.jfc_num; + jfc_arr = kcalloc(arr_num, sizeof(uint64_t), GFP_KERNEL); + if (jfc_arr == NULL) + return -ENOMEM; + + ret = uburma_copy_from_user((void *)jfc_arr, + (void __user *)arg.in.jfc_ptr, + arr_num * sizeof(uint64_t)); + if (ret != 0) + goto free_jfc_arr; + + uobj_arr = kcalloc(arr_num, sizeof(struct uburma_uobj *), GFP_KERNEL); + if (uobj_arr == NULL) { + ret = -ENOMEM; + goto free_jfc_arr; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_get_del(UOBJ_CLASS_JFC, jfc_arr[i], file); + uobj_arr[i] = uobj; + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jfc, index is %d.\n", i); + ret = -EINVAL; + goto free_uobj_arr; + } + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jfc_uobj = container_of(uobj, struct uburma_jfc_uobj, uobj); + comp_events_reported += jfc_uobj->comp_events_reported; + async_events_reported += jfc_uobj->async_events_reported; + } + + ret = uobj_remove_commit_batch(uobj_arr, arr_num, &bad_jfc_index); + if (ret != 0) + uburma_log_err("delete jfc failed, ret:%d.\n", ret); + + uobj_put_batch(uobj_arr, arr_num); + uobj_put_del_batch(uobj_arr, arr_num); + + arg.out.comp_events_reported = comp_events_reported; + arg.out.async_events_reported = async_events_reported; + arg.out.bad_jfc_index = bad_jfc_index; + + ret_bad = uburma_tlv_append(hdr, (void *)&arg); + if (ret_bad != 0) { + uburma_log_err("uburma tlv append error, ret:%d.\n", ret_bad); + ret = ret_bad; + } + +free_uobj_arr: + kfree(uobj_arr); +free_jfc_arr: + kfree(jfc_arr); + return ret; +} + +static void fill_create_jetty_attr(struct ubcore_jetty_cfg *cfg, + struct uburma_cmd_create_jetty *arg) +{ + cfg->id = arg->in.id; + cfg->jfs_depth = arg->in.jfs_depth; + cfg->jfr_depth = arg->in.jfr_depth; + cfg->flag.bs.share_jfr = arg->in.jetty_flag & + 0x1; // see urma_jetty_flag + cfg->flag.bs.lock_free = + ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.lock_free; + cfg->flag.bs.error_suspend = + ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.error_suspend; + cfg->flag.bs.outorder_comp = + ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.outorder_comp; + cfg->flag.bs.order_type = + ((union ubcore_jfs_flag)arg->in.jfs_flag).bs.order_type; + // see urma_jfs_flag + + cfg->max_send_sge = arg->in.max_send_sge; + cfg->max_send_rsge = arg->in.max_send_rsge; + cfg->max_recv_sge = arg->in.max_recv_sge; + cfg->max_inline_data = arg->in.max_inline_data; + cfg->priority = arg->in.priority; + cfg->rnr_retry = arg->in.rnr_retry; + cfg->err_timeout = arg->in.err_timeout; + cfg->min_rnr_timer = arg->in.min_rnr_timer; + cfg->trans_mode = arg->in.trans_mode; +} + +static void fill_create_jetty_out(struct uburma_cmd_create_jetty *arg, + struct ubcore_jetty *jetty) +{ + arg->out.id = jetty->jetty_id.id; + arg->out.jfs_depth = jetty->jetty_cfg.jfs_depth; + arg->out.jfr_depth = jetty->jetty_cfg.jfr_depth; + arg->out.max_send_sge = jetty->jetty_cfg.max_send_sge; + arg->out.max_send_rsge = jetty->jetty_cfg.max_send_rsge; + arg->out.max_recv_sge = jetty->jetty_cfg.max_recv_sge; + arg->out.max_inline_data = jetty->jetty_cfg.max_inline_data; +} + +static int uburma_cmd_create_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jetty arg = { 0 }; + struct uburma_uobj *send_jfc_uobj = ERR_PTR(-ENOENT); + struct uburma_uobj *recv_jfc_uobj = ERR_PTR(-ENOENT); + struct uburma_uobj *jfr_uobj = ERR_PTR(-ENOENT); + struct uburma_uobj *jetty_grp_uobj = ERR_PTR(-ENOENT); + struct ubcore_jetty_cfg cfg = { 0 }; + struct uburma_jetty_uobj *jetty_uobj; + struct ubcore_udata udata; + struct ubcore_jetty *jetty; + int ret = 0; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + jetty_uobj = + (struct uburma_jetty_uobj *)uobj_alloc(UOBJ_CLASS_JETTY, file); + if (IS_ERR_OR_NULL(jetty_uobj)) { + uburma_log_err("UOBJ_CLASS_JETTY alloc fail!\n"); + return -ENOMEM; + } + jetty_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jetty_uobj->async_event_list); + cfg.jetty_context = jetty_uobj; + + fill_create_jetty_attr(&cfg, &arg); + cfg.eid_index = file->ucontext->eid_index; + send_jfc_uobj = + uobj_get_read(UOBJ_CLASS_JFC, arg.in.send_jfc_handle, file); + recv_jfc_uobj = + uobj_get_read(UOBJ_CLASS_JFC, arg.in.recv_jfc_handle, file); + if (IS_ERR_OR_NULL(send_jfc_uobj) || IS_ERR_OR_NULL(recv_jfc_uobj)) { + uburma_log_err("failed to find send %llu or recv jfc %llu.\n", + arg.in.send_jfc_handle, arg.in.recv_jfc_handle); + ret = -EINVAL; + goto err_put; + } + cfg.send_jfc = send_jfc_uobj->object; + cfg.recv_jfc = recv_jfc_uobj->object; + if (arg.in.jfr_handle != 0) { + jfr_uobj = + uobj_get_read(UOBJ_CLASS_JFR, arg.in.jfr_handle, file); + if (IS_ERR_OR_NULL(jfr_uobj)) { + uburma_log_err("failed to find jfr, jfr_handle:%llu.\n", + arg.in.jfr_handle); + ret = -EINVAL; + goto err_put; + } + cfg.jfr = jfr_uobj->object; + cfg.flag.bs.share_jfr = 1; + } + if (arg.in.is_jetty_grp != 0) { + jetty_grp_uobj = uobj_get_read(UOBJ_CLASS_JETTY_GRP, + arg.in.jetty_grp_handle, file); + if (IS_ERR_OR_NULL(jetty_grp_uobj)) { + uburma_log_err( + "failed to find jetty_grp, jetty_grp_handle:%llu.\n", + arg.in.jetty_grp_handle); + ret = -EINVAL; + goto err_put; + } + cfg.jetty_grp = + (struct ubcore_jetty_group *)jetty_grp_uobj->object; + } + cfg.token_value.token = arg.in.token; + fill_udata(&udata, file->ucontext, &arg.udata); + + jetty = ubcore_create_jetty(ubc_dev, &cfg, uburma_jetty_event_cb, + &udata); + if (IS_ERR_OR_NULL(jetty)) { + uburma_log_err("create jetty or get jetty_id failed.\n"); + ret = PTR_ERR(jetty); + goto err_put; + } + + jetty_uobj->uobj.object = jetty; + jetty->urma_jetty = arg.in.urma_jetty; + /* Do not release jfae fd until jetty is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jetty; + + fill_create_jetty_out(&arg, jetty); + arg.out.handle = jetty_uobj->uobj.id; + + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) + goto err_put_jfae; + + if (cfg.jetty_grp) + uobj_put_read(jetty_grp_uobj); + if (cfg.jfr) + uobj_put_read(jfr_uobj); + uobj_put_read(send_jfc_uobj); + uobj_put_read(recv_jfc_uobj); + uobj_alloc_commit(&jetty_uobj->uobj); + return 0; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jetty: + (void)ubcore_delete_jetty(jetty); +err_put: + if (!IS_ERR_OR_NULL(jetty_grp_uobj)) + uobj_put_read(jetty_grp_uobj); + if (!IS_ERR_OR_NULL(jfr_uobj)) + uobj_put_read(jfr_uobj); + if (!IS_ERR_OR_NULL(recv_jfc_uobj)) + uobj_put_read(recv_jfc_uobj); + if (!IS_ERR_OR_NULL(send_jfc_uobj)) + uobj_put_read(send_jfc_uobj); + uobj_alloc_abort(&jetty_uobj->uobj); + return ret; +} + +static int uburma_cmd_modify_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_jetty arg = { 0 }; + struct uburma_uobj *uobj; + struct ubcore_jetty_attr attr = { 0 }; + struct ubcore_jetty *jetty; + struct ubcore_udata udata; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + attr.mask = arg.in.mask; + attr.rx_threshold = arg.in.rx_threshold; + attr.state = (enum ubcore_jetty_state)arg.in.state; + fill_udata(&udata, file->ucontext, &arg.udata); + + uobj = uobj_get_write(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jetty.\n"); + return -EINVAL; + } + + jetty = (struct ubcore_jetty *)uobj->object; + ret = ubcore_modify_jetty(jetty, &attr, &udata); + if (ret != 0) { + uobj_put_write(uobj); + uburma_log_err("modify jetty failed, ret:%d.\n", ret); + return ret; + } + + ret = uburma_tlv_append(hdr, &arg); + uobj_put_write(uobj); + return ret; +} + +static int uburma_cmd_query_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_jetty arg = { 0 }; + struct ubcore_jetty_attr attr = { 0 }; + struct ubcore_jetty_cfg cfg = { 0 }; + struct uburma_uobj *uobj; + struct ubcore_jetty *jetty; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_get_read(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jetty.\n"); + return -EINVAL; + } + + jetty = (struct ubcore_jetty *)uobj->object; + ret = ubcore_query_jetty(jetty, &cfg, &attr); + if (ret != 0) { + uobj_put_read(uobj); + uburma_log_err("query jetty failed, ret:%d.\n", ret); + return ret; + } + + arg.out.id = cfg.id; + arg.out.jetty_flag = cfg.flag.value; + + arg.out.jfs_depth = cfg.jfs_depth; + arg.out.jfs_flag = 0; // todo + arg.out.trans_mode = (uint32_t)cfg.trans_mode; + arg.out.priority = cfg.priority; + arg.out.max_send_sge = cfg.max_send_sge; + arg.out.max_send_rsge = cfg.max_send_rsge; + arg.out.max_inline_data = cfg.max_inline_data; + arg.out.rnr_retry = cfg.rnr_retry; + arg.out.err_timeout = cfg.err_timeout; + + if (cfg.flag.bs.share_jfr == 1) { + arg.out.jfr_depth = cfg.jfr_depth; + arg.out.jfr_flag = 0; // todo + arg.out.max_recv_sge = cfg.max_recv_sge; + arg.out.min_rnr_timer = cfg.min_rnr_timer; + arg.out.token = cfg.token_value.token; + arg.out.jfr_id = 0; // todo + } + + arg.out.rx_threshold = attr.rx_threshold; + arg.out.state = (uint32_t)attr.state; + ret = uburma_tlv_append(hdr, &arg); + uobj_put_read(uobj); + return ret; +} + +static int uburma_cmd_delete_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jetty arg; + struct uburma_jetty_uobj *jetty_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JETTY, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jetty"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jetty_uobj = container_of(uobj, struct uburma_jetty_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jetty failed, ret:%d.\n", ret); + uobj_put(uobj); + uobj_put_del(uobj); + return ret; + } + + arg.out.async_events_reported = jetty_uobj->async_events_reported; + uobj_put(uobj); + uobj_put_del(uobj); + return uburma_tlv_append(hdr, &arg); +} + +static int uburma_cmd_delete_jetty_batch(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_jetty_uobj *jetty_uobj = NULL; + struct uburma_cmd_delete_jetty_batch arg; + struct uburma_uobj **uobj_arr = NULL; + uint32_t async_events_reported = 0; + struct uburma_uobj *uobj = NULL; + uint32_t bad_jetty_index = 0; + uint64_t *jetty_arr = NULL; + uint32_t arr_num; + int ret_bad; + int ret; + int i; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + arr_num = arg.in.jetty_num; + jetty_arr = kcalloc(arr_num, sizeof(uint64_t), GFP_KERNEL); + if (jetty_arr == NULL) + return -ENOMEM; + + ret = uburma_copy_from_user((void *)jetty_arr, + (void __user *)arg.in.jetty_ptr, + arr_num * sizeof(uint64_t)); + if (ret != 0) + goto free_jetty_arr; + + uobj_arr = kcalloc(arr_num, sizeof(struct uburma_uobj *), GFP_KERNEL); + if (uobj_arr == NULL) { + ret = -ENOMEM; + goto free_jetty_arr; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_get_del(UOBJ_CLASS_JETTY, jetty_arr[i], file); + uobj_arr[i] = uobj; + if (IS_ERR(uobj)) { + uburma_log_err("failed to find jetty, index is %d.\n", + i); + ret = -EINVAL; + goto free_uobj_arr; + } + /* To get events_reported after obj removed. */ + uobj_get(uobj); + jetty_uobj = container_of(uobj, struct uburma_jetty_uobj, uobj); + async_events_reported += jetty_uobj->async_events_reported; + } + + ret = uobj_remove_commit_batch(uobj_arr, arr_num, &bad_jetty_index); + if (ret != 0) + uburma_log_err("delete jetty failed, ret:%d.\n", ret); + + arg.out.async_events_reported = async_events_reported; + arg.out.bad_jetty_index = bad_jetty_index; + + uobj_put_batch(uobj_arr, arr_num); + uobj_put_del_batch(uobj_arr, arr_num); + + ret_bad = uburma_tlv_append(hdr, (void *)&arg); + if (ret_bad != 0) { + uburma_log_err("uburma tlv append error, ret:%d.\n", ret_bad); + ret = ret_bad; + } + +free_uobj_arr: + kfree(uobj_arr); +free_jetty_arr: + kfree(jetty_arr); + return ret; +} + +static int uburma_cmd_create_jfce(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jfce arg; + struct uburma_jfce_uobj *jfce; + struct uburma_uobj *uobj; + + uobj = uobj_alloc(UOBJ_CLASS_JFCE, file); + if (IS_ERR_OR_NULL(uobj)) + return PTR_ERR(uobj); + + jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + uburma_init_jfe(&jfce->jfe); + + arg.out.fd = uobj->id; /* should get fd before commit uobj */ + uobj_alloc_commit(uobj); + + return uburma_tlv_append(hdr, (void *)&arg); +} + +static int uburma_cmd_import_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_jfr arg; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_tjetty *tjfr; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JFR, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JFR alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.id = arg.in.id; + cfg.flag.value = arg.in.flag; + cfg.token_value.token = arg.in.token; + cfg.trans_mode = arg.in.trans_mode; + cfg.eid_index = file->ucontext->eid_index; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjfr = ubcore_import_jfr(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tjfr)) { + uburma_log_err("ubcore_import_jfr failed.\n"); + uobj_alloc_abort(uobj); + return PTR_ERR(tjfr); + } + + uobj->object = tjfr; + arg.out.handle = (uint64_t)uobj->id; + if (tjfr->vtpn != NULL) + arg.out.tpn = tjfr->vtpn->vtpn; + else if (tjfr->tp != NULL) + arg.out.tpn = tjfr->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) { + ubcore_unimport_jfr(tjfr); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +static int uburma_cmd_import_jfr_ex(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct uburma_cmd_import_jfr_ex arg = { 0 }; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_udata udata = { 0 }; + struct ubcore_tjetty *tjfr; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, (void *)&arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JFR, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JFR alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.id = arg.in.id; + cfg.flag.value = arg.in.flag; + cfg.token_value.token = arg.in.token; + cfg.trans_mode = arg.in.trans_mode; + cfg.eid_index = file->ucontext->eid_index; + + active_tp_cfg.tp_handle.value = arg.in.tp_handle; + active_tp_cfg.peer_tp_handle.value = arg.in.peer_tp_handle; + active_tp_cfg.tag = arg.in.tag; + active_tp_cfg.tp_attr.tx_psn = arg.in.tx_psn; + active_tp_cfg.tp_attr.rx_psn = arg.in.rx_psn; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjfr = ubcore_import_jfr_ex(ubc_dev, &cfg, &active_tp_cfg, &udata); + if (IS_ERR_OR_NULL(tjfr)) { + uburma_log_err("ubcore_import_jfr failed.\n"); + uobj_alloc_abort(uobj); + return PTR_ERR(tjfr); + } + + uobj->object = tjfr; + arg.out.handle = (uint64_t)uobj->id; + if (tjfr->vtpn != NULL) + arg.out.tpn = tjfr->vtpn->vtpn; + else if (tjfr->tp != NULL) + arg.out.tpn = tjfr->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + ret = uburma_tlv_append(hdr, (void *)&arg); + if (ret != 0) { + ubcore_unimport_jfr(tjfr); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +static int uburma_cmd_unimport_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_jfr arg; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_JFR, arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find tjfr"); + return -EINVAL; + } + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unimport_jfr failed.\n"); + + uobj_put_del(uobj); + return ret; +} + +static int uburma_cmd_import_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_jetty arg; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_tjetty *tjetty; + struct ubcore_udata udata; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JETTY, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JETTY alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.id = arg.in.id; + cfg.token_value.token = arg.in.token; + cfg.trans_mode = (enum ubcore_transport_mode)arg.in.trans_mode; + cfg.flag.value = arg.in.flag; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + cfg.type = (enum ubcore_target_type)arg.in.type; + cfg.eid_index = file->ucontext->eid_index; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjetty = ubcore_import_jetty(ubc_dev, &cfg, &udata); + if (IS_ERR_OR_NULL(tjetty)) { + uburma_log_err("ubcore_import_jetty failed.\n"); + uobj_alloc_abort(uobj); + return PTR_ERR(tjetty); + } + + uobj->object = tjetty; + arg.out.handle = (uint64_t)uobj->id; + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) + arg.out.tpn = tjetty->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) { + (void)ubcore_unimport_jetty(tjetty); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +int uburma_unimport_jetty(struct uburma_file *file, bool async, + int tjetty_handle) +{ + struct uburma_tjetty_uobj *tjetty_uobj; + struct uburma_uobj *uobj; + int ret; + + uobj = uobj_get_del(UOBJ_CLASS_TARGET_JETTY, tjetty_handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find tjetty"); + return -EINVAL; + } + + uobj_get(uobj); + tjetty_uobj = container_of(uobj, struct uburma_tjetty_uobj, uobj); + tjetty_uobj->should_unimport_async = async; + ret = uobj_remove_commit(uobj); + if (ret != 0) + uburma_log_err("ubcore_unimport_jetty_async failed.\n"); + + uobj_put(uobj); + uobj_put_del(uobj); + return ret; +} + +static int uburma_cmd_unimport_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_jetty arg; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + return uburma_unimport_jetty(file, false, arg.in.handle); +} + +static int uburma_get_jetty_tjetty_objs(struct uburma_file *file, + uint64_t jetty_handle, + uint64_t tjetty_handle, + struct uburma_uobj **jetty_uobj, + struct uburma_uobj **tjetty_uobj) +{ + *jetty_uobj = uobj_get_read(UOBJ_CLASS_JETTY, jetty_handle, file); + if (IS_ERR_OR_NULL(*jetty_uobj)) { + uburma_log_err("failed to find jetty with handle %llu", + jetty_handle); + return -EINVAL; + } + + *tjetty_uobj = + uobj_get_read(UOBJ_CLASS_TARGET_JETTY, tjetty_handle, file); + if (IS_ERR_OR_NULL(*tjetty_uobj)) { + uobj_put_read(*jetty_uobj); + uburma_log_err("failed to find target jetty with handle %llu", + tjetty_handle); + return -EINVAL; + } + return 0; +} + +static inline void uburma_put_jetty_tjetty_objs(struct uburma_uobj *jetty_uobj, + struct uburma_uobj *tjetty_uobj) +{ + uobj_put_read(jetty_uobj); + uobj_put_read(tjetty_uobj); +} + +static int uburma_get_jfs_tjfr_objs(struct uburma_file *file, + uint64_t jetty_handle, + uint64_t tjetty_handle, + struct uburma_uobj **jetty_uobj, + struct uburma_uobj **tjetty_uobj) +{ + *jetty_uobj = uobj_get_read(UOBJ_CLASS_JFS, jetty_handle, file); + if (IS_ERR_OR_NULL(*jetty_uobj)) { + uburma_log_err("failed to find jfs with handle %llu", + jetty_handle); + return -EINVAL; + } + + *tjetty_uobj = + uobj_get_read(UOBJ_CLASS_TARGET_JFR, tjetty_handle, file); + if (IS_ERR_OR_NULL(*tjetty_uobj)) { + uobj_put_read(*jetty_uobj); + uburma_log_err("failed to find target jfr with handle %llu", + tjetty_handle); + return -EINVAL; + } + return 0; +} + +static inline void uburma_put_jfs_tjfr_objs(struct uburma_uobj *jetty_uobj, + struct uburma_uobj *tjetty_uobj) +{ + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); +} + +static int uburma_cmd_advise_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_advise_jetty arg; + struct uburma_uobj *tjfr_uobj; + struct uburma_uobj *jfs_uobj; + struct ubcore_udata udata; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jfs_tjfr_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jfs_uobj, + &tjfr_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_advise_jfr(jfs_uobj->object, tjfr_uobj->object, &udata); + if (ret != 0) + uburma_log_err("advise jfr failed.\n"); + + uburma_put_jfs_tjfr_objs(jfs_uobj, tjfr_uobj); + return ret; +} + +static int uburma_cmd_unadvise_jfr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + struct uburma_uobj *tjfr_uobj; + struct uburma_uobj *jfs_uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jfs_tjfr_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jfs_uobj, + &tjfr_uobj)) + return -EINVAL; + + ret = ubcore_unadvise_jfr(jfs_uobj->object, tjfr_uobj->object); + if (ret != 0) + uburma_log_err("failed to unadvise jfr.\n"); + + uburma_put_jfs_tjfr_objs(jfs_uobj, tjfr_uobj); + return ret; +} + +static int uburma_cmd_advise_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_advise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_udata udata; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jetty_uobj, + &tjetty_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_advise_jetty(jetty_uobj->object, tjetty_uobj->object, + &udata); + if (ret != 0) + uburma_log_err("advise_jetty failed.\n"); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_unadvise_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jetty_uobj, + &tjetty_uobj)) + return -EINVAL; + + ret = ubcore_unadvise_jetty(jetty_uobj->object, tjetty_uobj->object); + if (ret != 0) + uburma_log_err("failed to unadvise jetty, ret: %d.\n", ret); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_bind_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_tjetty_uobj *uburma_tjetty; + struct uburma_cmd_bind_jetty arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_tjetty *tjetty; + struct ubcore_udata udata; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jetty_uobj, + &tjetty_uobj)) + return -EINVAL; + + fill_udata(&udata, file->ucontext, &arg.udata); + + tjetty = (struct ubcore_tjetty *)tjetty_uobj->object; + ret = ubcore_bind_jetty(jetty_uobj->object, tjetty, &udata); + if (ret != 0) { + uburma_log_err("bind jetty failed.\n"); + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; + } + + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) + arg.out.tpn = tjetty->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + uburma_tjetty = (struct uburma_tjetty_uobj *)(tjetty_uobj); + uburma_tjetty->jetty_uobj = (struct uburma_jetty_uobj *)jetty_uobj; + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) + (void)ubcore_unbind_jetty(jetty_uobj->object); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_bind_jetty_ex(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct uburma_cmd_bind_jetty_ex arg = { 0 }; + struct uburma_tjetty_uobj *uburma_tjetty; + struct ubcore_udata udata = { 0 }; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_tjetty *tjetty; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jetty_uobj, + &tjetty_uobj)) + return -EINVAL; + + active_tp_cfg.tp_handle.value = arg.in.tp_handle; + active_tp_cfg.peer_tp_handle.value = arg.in.peer_tp_handle; + active_tp_cfg.tag = arg.in.tag; + active_tp_cfg.tp_attr.tx_psn = arg.in.tx_psn; + active_tp_cfg.tp_attr.rx_psn = arg.in.rx_psn; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjetty = (struct ubcore_tjetty *)tjetty_uobj->object; + ret = ubcore_bind_jetty_ex(jetty_uobj->object, tjetty, &active_tp_cfg, + &udata); + if (ret != 0) { + uburma_log_err("bind jetty failed.\n"); + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; + } + + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) + arg.out.tpn = tjetty->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + uburma_tjetty = (struct uburma_tjetty_uobj *)(tjetty_uobj); + uburma_tjetty->jetty_uobj = (struct uburma_jetty_uobj *)jetty_uobj; + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) + (void)ubcore_unbind_jetty(jetty_uobj->object); + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +int uburma_unbind_jetty(struct uburma_file *file, bool async, int jetty_handle, + int tjetty_handle) +{ + struct uburma_tjetty_uobj *uburma_tjetty; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + int ret; + + if (uburma_get_jetty_tjetty_objs(file, jetty_handle, tjetty_handle, + &jetty_uobj, &tjetty_uobj)) + return -EINVAL; + + if (async) + ret = ubcore_unbind_jetty_async(jetty_uobj->object, 0, NULL); + else + ret = ubcore_unbind_jetty(jetty_uobj->object); + if (ret != 0) + uburma_log_err("failed to unbind jetty, ret: %d.\n", ret); + + uburma_tjetty = (struct uburma_tjetty_uobj *)(tjetty_uobj); + uburma_tjetty->jetty_uobj = NULL; + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_unbind_jetty(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + return uburma_unbind_jetty(file, false, arg.in.jetty_handle, + arg.in.tjetty_handle); +} + +static int uburma_cmd_create_jetty_grp(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_jetty_grp arg; + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + struct ubcore_jetty_grp_cfg cfg = { 0 }; + struct ubcore_udata udata; + struct ubcore_jetty_group *jetty_grp; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + arg.in.name[UBCORE_JETTY_GRP_MAX_NAME - 1] = '\0'; + (void)memcpy(cfg.name, arg.in.name, UBCORE_JETTY_GRP_MAX_NAME); + cfg.name[UBCORE_JETTY_GRP_MAX_NAME - 1] = '\0'; + + cfg.token_value.token = arg.in.token; + cfg.id = arg.in.id; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + cfg.flag.value = arg.in.flag; + cfg.eid_index = file->ucontext->eid_index; + fill_udata(&udata, file->ucontext, &arg.udata); + + jetty_grp_uobj = (struct uburma_jetty_grp_uobj *)uobj_alloc( + UOBJ_CLASS_JETTY_GRP, file); + if (IS_ERR_OR_NULL(jetty_grp_uobj)) { + uburma_log_err("UOBJ_CLASS_JETTY_GRP alloc fail!\n"); + return -ENOMEM; + } + jetty_grp_uobj->async_events_reported = 0; + INIT_LIST_HEAD(&jetty_grp_uobj->async_event_list); + cfg.user_ctx = (uint64_t)jetty_grp_uobj; + + jetty_grp = ubcore_create_jetty_grp(ubc_dev, &cfg, + uburma_jetty_grp_event_cb, &udata); + if (IS_ERR_OR_NULL(jetty_grp)) { + uburma_log_err("create jetty_grp failed.\n"); + ret = PTR_ERR(jetty_grp); + goto err_alloc_abort; + } + jetty_grp_uobj->uobj.object = jetty_grp; + jetty_grp->urma_jetty_grp = arg.in.urma_jetty_grp; + + /* Do not release jfae fd until jetty_grp is destroyed */ + ret = uburma_get_jfae(file); + if (ret != 0) + goto err_delete_jetty_grp; + + arg.out.id = jetty_grp->jetty_grp_id.id; + arg.out.handle = jetty_grp_uobj->uobj.id; + + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) + goto err_put_jfae; + + uobj_alloc_commit(&jetty_grp_uobj->uobj); + return ret; + +err_put_jfae: + uburma_put_jfae(file); +err_delete_jetty_grp: + (void)ubcore_delete_jetty_grp(jetty_grp); +err_alloc_abort: + uobj_alloc_abort(&jetty_grp_uobj->uobj); + return ret; +} + +static int uburma_cmd_delete_jetty_grp(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_delete_jetty_grp arg; + struct uburma_jetty_grp_uobj *jetty_grp_uobj; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_get_del(UOBJ_CLASS_JETTY_GRP, (int)arg.in.handle, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("failed to find jetty group"); + return -EINVAL; + } + + /* To get async_events_reported after obj removed. */ + uobj_get(uobj); + jetty_grp_uobj = container_of(uobj, struct uburma_jetty_grp_uobj, uobj); + + ret = uobj_remove_commit(uobj); + if (ret != 0) { + uburma_log_err("delete jfr failed, ret:%d.\n", ret); + uobj_put(uobj); + uobj_put_del(uobj); + return ret; + } + + arg.out.async_events_reported = jetty_grp_uobj->async_events_reported; + uobj_put(uobj); + uobj_put_del(uobj); + return uburma_tlv_append(hdr, &arg); +} + +static int uburma_fill_user_ctl_info(struct ubcore_ucontext *ctx, + struct uburma_cmd_user_ctl *user_ctl, + struct ubcore_user_ctl *k_user_ctl) +{ + if (ctx == NULL) { + uburma_log_err("parameter invalid with ctx nullptr.\n"); + return -EINVAL; + } + + k_user_ctl->uctx = ctx; + k_user_ctl->in.addr = user_ctl->in.addr; + k_user_ctl->in.len = user_ctl->in.len; + k_user_ctl->in.opcode = user_ctl->in.opcode; + + k_user_ctl->out.addr = user_ctl->out.addr; + k_user_ctl->out.len = user_ctl->out.len; + + k_user_ctl->udrv_data.in_addr = user_ctl->udrv.in_addr; + k_user_ctl->udrv_data.in_len = user_ctl->udrv.in_len; + k_user_ctl->udrv_data.out_addr = user_ctl->udrv.out_addr; + k_user_ctl->udrv_data.out_len = user_ctl->udrv.out_len; + + return 0; +} + +static int uburma_fill_eid_list(struct ubcore_device *dev, + struct uburma_cmd_get_eid_list *eid_list) +{ + struct ubcore_eid_entry *e; + uint32_t max_eid_cnt = 0; + uint32_t eid_cnt = 0; + uint32_t i; + + spin_lock(&dev->eid_table.lock); + if (dev->eid_table.eid_entries == NULL) { + spin_unlock(&dev->eid_table.lock); + return -EINVAL; + } + + max_eid_cnt = min(dev->eid_table.eid_cnt, eid_list->in.max_eid_cnt); + for (i = 0; i < max_eid_cnt; i++) { + e = &dev->eid_table.eid_entries[i]; + if (!e->valid || !net_eq(e->net, current->nsproxy->net_ns)) + continue; + + eid_list->out.eid_list[eid_cnt].eid_index = e->eid_index; + eid_list->out.eid_list[eid_cnt].eid = e->eid; + eid_cnt++; + } + eid_list->out.eid_cnt = eid_cnt; + spin_unlock(&dev->eid_table.lock); + return 0; +} + +static int uburma_cmd_get_eid_list(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_get_eid_list *args; + int ret; + + args = kcalloc(1, sizeof(struct uburma_cmd_get_eid_list), GFP_KERNEL); + if (args == NULL) + return -ENOMEM; + + ret = uburma_tlv_parse(hdr, args); + if (ret != 0) + goto out; + + ret = uburma_fill_eid_list(ubc_dev, args); + if (ret != 0) + goto out; + + ret = uburma_tlv_append(hdr, args); +out: + kfree(args); + return ret; +} + +static int uburma_cmd_user_ctl(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_user_ctl k_user_ctl = { 0 }; + struct uburma_cmd_user_ctl arg; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + ret = uburma_fill_user_ctl_info(file->ucontext, &arg, &k_user_ctl); + if (ret != 0) + return ret; + + ret = ubcore_user_control(ubc_dev, &k_user_ctl); + if (ret != 0) + return ret; + + return 0; +} + +static int uburma_cmd_get_net_addr_list(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_get_net_addr_list arg = { 0 }; + struct uburma_cmd_net_addr_info *netaddr_info; + struct ubcore_sip_info *entry; + uint32_t max_netaddr_cnt; + uint32_t netaddr_cnt = 0; + size_t netaddr_size; + uint32_t i; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + max_netaddr_cnt = + min(ubc_dev->sip_table.max_sip_cnt, arg.in.max_netaddr_cnt); + netaddr_size = + max_netaddr_cnt * sizeof(struct uburma_cmd_net_addr_info); + netaddr_info = kcalloc(1, netaddr_size, GFP_KERNEL); + if (netaddr_info == NULL) + return -ENOMEM; + + mutex_lock(&ubc_dev->sip_table.lock); + if (ubc_dev->sip_table.entry == NULL) { + mutex_unlock(&ubc_dev->sip_table.lock); + ret = -EINVAL; + goto free_list; + } + + for (i = 0; i < max_netaddr_cnt; i++) { + entry = &ubc_dev->sip_table.entry[i].sip_info; + if (entry->is_active) { + netaddr_info[netaddr_cnt].netaddr = entry->addr; + netaddr_info[netaddr_cnt].index = i; + netaddr_cnt++; + } + } + mutex_unlock(&ubc_dev->sip_table.lock); + + arg.out.netaddr_cnt = netaddr_cnt; + arg.out.len = (uint64_t)netaddr_size; + arg.out.addr = (uint64_t)(uintptr_t)netaddr_info; + + ret = uburma_tlv_append(hdr, &arg); + +free_list: + kfree(netaddr_info); + return ret; +} + +static void uburma_fill_tp_cfg(struct uburma_cmd_user_tp_cfg *cmd_tp_cfg, + struct ubcore_tp_cfg *tp_cfg) +{ + /* Attention: ubcore_tp_cfg_flag is different with uburma_cmd_tp_cfg_flag */ + /* so we cannot fill by value */ + tp_cfg->flag.bs.target = cmd_tp_cfg->flag.bs.target; + tp_cfg->flag.bs.loopback = cmd_tp_cfg->flag.bs.loopback; + tp_cfg->flag.bs.dca_enable = cmd_tp_cfg->flag.bs.dca_enable; + tp_cfg->flag.bs.bonding = cmd_tp_cfg->flag.bs.bonding; + + tp_cfg->trans_mode = cmd_tp_cfg->trans_mode; + tp_cfg->retry_num = cmd_tp_cfg->retry_num; + tp_cfg->retry_factor = cmd_tp_cfg->retry_factor; + tp_cfg->ack_timeout = cmd_tp_cfg->ack_timeout; + tp_cfg->dscp = cmd_tp_cfg->dscp; + tp_cfg->oor_cnt = cmd_tp_cfg->oor_cnt; +} + +static void uburma_fill_peer_net_addr(struct uburma_cmd_net_addr *cmd_net_addr, + struct ubcore_net_addr *net_addr) +{ + if (cmd_net_addr->sin_family == AF_INET) { + net_addr->type = UBCORE_NET_ADDR_TYPE_IPV4; + net_addr->net_addr.in4.addr = cmd_net_addr->in4.s_addr; + } else if (cmd_net_addr->sin_family == AF_INET6) { + net_addr->type = UBCORE_NET_ADDR_TYPE_IPV6; + (void)memcpy(net_addr->net_addr.raw, &cmd_net_addr->in6, + sizeof(struct in6_addr)); + } + + net_addr->vlan = cmd_net_addr->vlan; + (void)memcpy(net_addr->mac, cmd_net_addr->mac, + UBCORE_MAC_BYTES * sizeof(uint8_t)); + net_addr->prefix_len = cmd_net_addr->prefix_len; +} + +static void uburma_fill_tp_attr(struct uburma_cmd_tp_attr *cmd_attr, + struct ubcore_tp_attr *attr) +{ + attr->flag.value = cmd_attr->flag.value; + attr->peer_tpn = cmd_attr->peer_tpn; + attr->state = cmd_attr->state; + attr->tx_psn = cmd_attr->tx_psn; + attr->rx_psn = cmd_attr->rx_psn; + attr->mtu = cmd_attr->mtu; + attr->cc_pattern_idx = cmd_attr->cc_pattern_idx; + attr->oos_cnt = cmd_attr->oos_cnt; + attr->local_net_addr_idx = cmd_attr->local_net_addr_idx; + uburma_fill_peer_net_addr(&cmd_attr->peer_net_addr, + &attr->peer_net_addr); + attr->data_udp_start = cmd_attr->data_udp_start; + attr->ack_udp_start = cmd_attr->ack_udp_start; + attr->udp_range = cmd_attr->udp_range; + attr->hop_limit = cmd_attr->hop_limit; + attr->flow_label = cmd_attr->flow_label; + attr->port_id = cmd_attr->port_id; + attr->mn = cmd_attr->mn; + attr->peer_trans_type = cmd_attr->peer_trans_type; +} + +/* Attention: cmd_mask is different with mask, so we cannot fill by value */ +static void uburma_fill_tp_attr_mask(union uburma_cmd_tp_attr_mask *cmd_mask, + union ubcore_tp_attr_mask *mask) +{ + mask->bs.flag = cmd_mask->bs.flag; + mask->bs.peer_tpn = cmd_mask->bs.peer_tpn; + mask->bs.state = cmd_mask->bs.state; + mask->bs.tx_psn = cmd_mask->bs.tx_psn; + mask->bs.rx_psn = cmd_mask->bs.rx_psn; + mask->bs.mtu = cmd_mask->bs.mtu; + mask->bs.cc_pattern_idx = cmd_mask->bs.cc_pattern_idx; + mask->bs.oos_cnt = cmd_mask->bs.oos_cnt; + mask->bs.local_net_addr_idx = cmd_mask->bs.local_net_addr_idx; + mask->bs.peer_net_addr = cmd_mask->bs.peer_net_addr; + mask->bs.data_udp_start = cmd_mask->bs.data_udp_start; + mask->bs.ack_udp_start = cmd_mask->bs.ack_udp_start; + mask->bs.udp_range = cmd_mask->bs.udp_range; + mask->bs.hop_limit = cmd_mask->bs.hop_limit; + mask->bs.flow_label = cmd_mask->bs.flow_label; + mask->bs.port_id = cmd_mask->bs.port_id; + mask->bs.mn = cmd_mask->bs.mn; + mask->bs.peer_trans_type = cmd_mask->bs.peer_trans_type; +} + +static int uburma_modify_user_tp(struct ubcore_device *ubc_dev, + struct uburma_cmd_modify_tp *arg) +{ + union ubcore_tp_attr_mask mask = { 0 }; + struct ubcore_tp_cfg tp_cfg = { 0 }; + struct ubcore_tp_attr attr = { 0 }; + int ret; + + if (ubc_dev == NULL || ubc_dev->ops == NULL || + ubc_dev->ops->modify_user_tp == NULL) { + uburma_log_err("Invalid parameter.\n"); + return -1; + } + + uburma_fill_tp_cfg(&arg->in.tp_cfg, &tp_cfg); + uburma_fill_tp_attr(&arg->in.attr, &attr); + uburma_fill_tp_attr_mask(&arg->in.mask, &mask); + + ret = ubc_dev->ops->modify_user_tp(ubc_dev, arg->in.tpn, &tp_cfg, &attr, + mask); + if (ret != 0) + uburma_log_err("Failed to modify user tp, ret: %d.\n", ret); + + return ret; +} + +static int uburma_cmd_modify_tp(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_modify_tp *arg; + int ret; + + arg = kcalloc(1, sizeof(struct uburma_cmd_modify_tp), GFP_KERNEL); + if (arg == NULL) + return -ENOMEM; + + ret = uburma_tlv_parse(hdr, arg); + if (ret != 0) + goto out; + + ret = uburma_modify_user_tp(ubc_dev, arg); + if (ret != 0) + uburma_log_err("Failed to modify user tp, tpn: %u.\n", + arg->in.tpn); +out: + kfree(arg); + return ret; +} + +static void uburma_fill_device_attr(struct ubcore_device *ubc_dev, + struct uburma_cmd_device_attr *attr) +{ + uint8_t port_cnt, i; + + attr->guid = ubc_dev->attr.guid; + attr->dev_cap.feature.value = ubc_dev->attr.dev_cap.feature.value; + attr->dev_cap.max_jfc = ubc_dev->attr.dev_cap.max_jfc; + attr->dev_cap.max_jfs = ubc_dev->attr.dev_cap.max_jfs; + attr->dev_cap.max_jfr = ubc_dev->attr.dev_cap.max_jfr; + attr->dev_cap.max_jetty = ubc_dev->attr.dev_cap.max_jetty; + attr->dev_cap.max_jetty_grp = ubc_dev->attr.dev_cap.max_jetty_grp; + attr->dev_cap.max_jetty_in_jetty_grp = + ubc_dev->attr.dev_cap.max_jetty_in_jetty_grp; + attr->dev_cap.max_jfc_depth = ubc_dev->attr.dev_cap.max_jfc_depth; + attr->dev_cap.max_jfs_depth = ubc_dev->attr.dev_cap.max_jfs_depth; + attr->dev_cap.max_jfr_depth = ubc_dev->attr.dev_cap.max_jfr_depth; + attr->dev_cap.max_jfs_inline_len = + ubc_dev->attr.dev_cap.max_jfs_inline_size; + attr->dev_cap.max_jfs_sge = ubc_dev->attr.dev_cap.max_jfs_sge; + attr->dev_cap.max_jfs_rsge = ubc_dev->attr.dev_cap.max_jfs_rsge; + attr->dev_cap.max_jfr_sge = ubc_dev->attr.dev_cap.max_jfr_sge; + attr->dev_cap.max_msg_size = ubc_dev->attr.dev_cap.max_msg_size; + attr->dev_cap.max_read_size = ubc_dev->attr.dev_cap.max_read_size; + attr->dev_cap.max_write_size = ubc_dev->attr.dev_cap.max_write_size; + attr->dev_cap.max_cas_size = ubc_dev->attr.dev_cap.max_cas_size; + attr->dev_cap.max_swap_size = ubc_dev->attr.dev_cap.max_swap_size; + attr->dev_cap.max_fetch_and_add_size = + ubc_dev->attr.dev_cap.max_fetch_and_add_size; + attr->dev_cap.max_fetch_and_sub_size = + ubc_dev->attr.dev_cap.max_fetch_and_sub_size; + attr->dev_cap.max_fetch_and_and_size = + ubc_dev->attr.dev_cap.max_fetch_and_and_size; + attr->dev_cap.max_fetch_and_or_size = + ubc_dev->attr.dev_cap.max_fetch_and_or_size; + attr->dev_cap.max_fetch_and_xor_size = + ubc_dev->attr.dev_cap.max_fetch_and_xor_size; + attr->dev_cap.atomic_feat.value = + ubc_dev->attr.dev_cap.atomic_feat.value; + attr->dev_cap.trans_mode = ubc_dev->attr.dev_cap.trans_mode; + attr->dev_cap.sub_trans_mode_cap = + ubc_dev->attr.dev_cap.sub_trans_mode_cap; + attr->dev_cap.congestion_ctrl_alg = + ubc_dev->attr.dev_cap.congestion_ctrl_alg; + attr->dev_cap.ceq_cnt = ubc_dev->attr.dev_cap.ceq_cnt; + attr->dev_cap.max_tp_in_tpg = ubc_dev->attr.dev_cap.max_tp_in_tpg; + attr->dev_cap.max_eid_cnt = ubc_dev->attr.dev_cap.max_eid_cnt; + attr->dev_cap.page_size_cap = ubc_dev->attr.dev_cap.page_size_cap; + attr->dev_cap.max_oor_cnt = ubc_dev->attr.dev_cap.max_oor_cnt; + attr->dev_cap.mn = ubc_dev->attr.dev_cap.mn; + attr->dev_cap.max_netaddr_cnt = ubc_dev->attr.dev_cap.max_netaddr_cnt; + + attr->port_cnt = ubc_dev->attr.port_cnt; + port_cnt = (attr->port_cnt < UBURMA_CMD_MAX_PORT_CNT) ? + attr->port_cnt : + UBURMA_CMD_MAX_PORT_CNT; + for (i = 0; i < port_cnt; i++) + attr->port_attr[i].max_mtu = ubc_dev->attr.port_attr[i].max_mtu; + + attr->reserved_jetty_id_min = ubc_dev->attr.reserved_jetty_id_min; + attr->reserved_jetty_id_max = ubc_dev->attr.reserved_jetty_id_max; +} + +static int uburma_fill_device_status(struct ubcore_device *ubc_dev, + struct uburma_cmd_device_attr *attr) +{ + struct ubcore_device_status status; + uint8_t port_cnt, i; + int ret; + + ret = ubcore_query_device_status(ubc_dev, &status); + if (ret != 0) { + uburma_log_err("Failed to query device status.\n"); + return ret; + } + + port_cnt = (attr->port_cnt < UBURMA_CMD_MAX_PORT_CNT) ? + attr->port_cnt : + UBURMA_CMD_MAX_PORT_CNT; + for (i = 0; i < port_cnt; i++) { + attr->port_attr[i].state = status.port_status[i].state; + attr->port_attr[i].active_width = + status.port_status[i].active_width; + attr->port_attr[i].active_speed = + status.port_status[i].active_speed; + attr->port_attr[i].active_mtu = + status.port_status[i].active_mtu; + } + + return 0; +} + +static int uburma_cmd_query_device_attr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_query_device_attr arg = { 0 }; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return -1; + + if (strcmp(arg.in.dev_name, ubc_dev->dev_name) != 0) { + uburma_log_err("Invalid parameter with error dev_name.\n"); + return -1; + } + uburma_fill_device_attr(ubc_dev, &arg.out.attr); + ret = uburma_fill_device_status(ubc_dev, &arg.out.attr); + if (ret != 0) + return ret; + + return uburma_tlv_append(hdr, &arg); +} + +struct uburma_import_jetty_async_user_arg { + struct uburma_notifier_uobj *notifier; + struct uburma_notify_event *notify; + struct uburma_uobj *uobj_tjetty; +}; + +static void uburma_free_notify_event(uint64_t event_data) +{ + kfree((struct uburma_notify_event *)event_data); +} + +static void uburma_cmd_import_jetty_async_callback(struct ubcore_tjetty *tjetty, + int status, void *user_arg) +{ + struct uburma_import_jetty_async_user_arg *arg = user_arg; + struct uburma_notifier_uobj *notifier = arg->notifier; + struct uburma_notify_event *notify_event = arg->notify; + struct uburma_notify *notify = ¬ify_event->notify; + struct uburma_uobj *uobj = arg->uobj_tjetty; + + notify->status = status; + if (status == 0) { + uobj->object = tjetty; + uobj_alloc_commit(uobj); + if (tjetty->vtpn != NULL) + notify->vtpn = tjetty->vtpn->vtpn; + } else + uobj_alloc_abort(uobj); + + notify->status = status; + if (tjetty->vtpn != NULL) + notify->vtpn = tjetty->vtpn->vtpn; + uburma_write_event_with_free_fn(¬ifier->jfe, + (uint64_t)(uintptr_t)notify_event, 0, + NULL, NULL, uburma_free_notify_event); + uobj_put(¬ifier->uobj); + kfree(user_arg); +} + +static struct ubcore_import_cb * +uburma_create_import_callback(struct uburma_cmd_import_jetty_async *arg, + struct uburma_file *file, + struct uburma_uobj *uobj_tjetty) +{ + struct uburma_import_jetty_async_user_arg *user_arg; + struct uburma_notifier_uobj *notifier; + struct uburma_notify_event *notify; + struct ubcore_import_cb *cb; + + notifier = uburma_get_notifier_uobj(arg->in.fd, file); + if (IS_ERR_OR_NULL(notifier)) { + uburma_log_err("failed to find notifier.\n"); + return NULL; + } + + notify = kzalloc(sizeof(struct uburma_notify_event), GFP_KERNEL); + if (IS_ERR_OR_NULL(notify)) { + uburma_log_err("failed to alloc notify.\n"); + goto put_notifier; + } + notify->notify.type = UBURMA_IMPORT_JETTY_NOTIFY; + notify->notify.user_ctx = arg->in.user_ctx; + notify->notify.urma_jetty = arg->in.urma_tjetty; + notify->tjetty_handle = uobj_tjetty->id; + + user_arg = kzalloc(sizeof(struct uburma_import_jetty_async_user_arg), + GFP_KERNEL); + if (IS_ERR_OR_NULL(user_arg)) { + uburma_log_err("failed to alloc user arg.\n"); + goto free_notify; + } + user_arg->notifier = notifier; + user_arg->notify = notify; + user_arg->uobj_tjetty = uobj_tjetty; + + cb = kzalloc(sizeof(struct ubcore_import_cb), GFP_KERNEL); + if (IS_ERR_OR_NULL(cb)) { + uburma_log_err("failed to alloc import callback.\n"); + goto free_user_arg; + } + atomic_inc(&user_arg->notifier->incomplete_cnt); + cb->callback = uburma_cmd_import_jetty_async_callback; + cb->user_arg = user_arg; + return cb; + +free_user_arg: + kfree(user_arg); +free_notify: + kfree(notify); +put_notifier: + uobj_put(¬ifier->uobj); + return NULL; +} + +static void uburma_delete_import_callback(struct ubcore_import_cb *cb) +{ + struct uburma_import_jetty_async_user_arg *user_arg; + + if (cb == NULL) + return; + user_arg = cb->user_arg; + atomic_dec(&user_arg->notifier->incomplete_cnt); + uobj_put(&user_arg->notifier->uobj); + kfree(user_arg->notify); + kfree(user_arg); + kfree(cb); +} + +static int uburma_cmd_import_jetty_async(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_import_jetty_async arg; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_tjetty *tjetty; + struct ubcore_udata udata; + struct uburma_uobj *uobj; + struct ubcore_import_cb *cb; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JETTY, file); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("UOBJ_CLASS_TARGET_JETTY alloc fail!\n"); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.id = arg.in.id; + cfg.token_value.token = arg.in.token; + cfg.trans_mode = (enum ubcore_transport_mode)arg.in.trans_mode; + cfg.flag.value = arg.in.flag; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + cfg.type = (enum ubcore_target_type)arg.in.type; + cfg.eid_index = file->ucontext->eid_index; + fill_udata(&udata, file->ucontext, &arg.udata); + + cb = uburma_create_import_callback(&arg, file, uobj); + if (IS_ERR_OR_NULL(cb)) { + uburma_log_err("failed to find notifier.\n"); + uobj_alloc_abort(uobj); + return -ENOMEM; + } + + tjetty = ubcore_import_jetty_async(ubc_dev, &cfg, arg.in.timeout, cb, + &udata); + if (IS_ERR_OR_NULL(tjetty)) { + uburma_log_err("ubcore_import_jetty_async failed.\n"); + uburma_delete_import_callback(cb); + uobj_alloc_abort(uobj); + return PTR_ERR(tjetty); + } + + arg.out.handle = (uint64_t)uobj->id; + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) { + ubcore_unimport_jetty_async(tjetty, 0, NULL); + uburma_delete_import_callback(cb); + uobj_alloc_abort(uobj); + return ret; + } + return 0; +} + +static int uburma_cmd_unimport_jetty_async(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unimport_jetty_async arg; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + return uburma_unimport_jetty(file, true, arg.in.handle); +} + +struct uburma_bind_jetty_async_user_arg { + struct uburma_notifier_uobj *notifier; + struct uburma_notify_event *notify; +}; + +static void uburma_cmd_bind_jetty_async_callback(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + int status, void *user_arg) +{ + struct uburma_bind_jetty_async_user_arg *arg = user_arg; + struct uburma_notifier_uobj *notifier = arg->notifier; + struct uburma_notify_event *notify_event = arg->notify; + struct uburma_notify *notify = ¬ify_event->notify; + + notify->status = status; + if (status == 0 && tjetty->vtpn != NULL) + notify->vtpn = tjetty->vtpn->vtpn; + uburma_write_event_with_free_fn(¬ifier->jfe, + (uint64_t)(uintptr_t)notify_event, 0, + NULL, NULL, uburma_free_notify_event); + kfree(user_arg); +} + +static struct ubcore_bind_cb *uburma_create_bind_callback( + struct uburma_cmd_bind_jetty_async *arg, struct uburma_file *file, + struct uburma_uobj *uobj_tjetty, struct uburma_uobj *uobj_jetty) +{ + struct uburma_notifier_uobj *notifier; + struct uburma_notify_event *notify; + struct uburma_bind_jetty_async_user_arg *user_arg; + struct ubcore_bind_cb *cb; + + notifier = uburma_get_notifier_uobj(arg->in.fd, file); + if (IS_ERR_OR_NULL(notifier)) { + uburma_log_err("failed to find notifier.\n"); + return NULL; + } + + notify = kzalloc(sizeof(struct uburma_notify_event), GFP_KERNEL); + if (IS_ERR_OR_NULL(notify)) { + uburma_log_err("failed to alloc notify.\n"); + goto put_notifier; + } + notify->notify.type = UBURMA_BIND_JETTY_NOTIFY; + notify->notify.user_ctx = arg->in.user_ctx; + notify->notify.urma_jetty = arg->in.urma_jetty; + notify->tjetty_handle = uobj_tjetty->id; + notify->jetty_handle = uobj_jetty->id; + + user_arg = kzalloc(sizeof(struct uburma_bind_jetty_async_user_arg), + GFP_KERNEL); + if (IS_ERR_OR_NULL(user_arg)) { + uburma_log_err("failed to alloc user arg.\n"); + goto free_notify; + } + user_arg->notifier = notifier; + user_arg->notify = notify; + + cb = kzalloc(sizeof(struct ubcore_bind_cb), GFP_KERNEL); + if (IS_ERR_OR_NULL(cb)) { + uburma_log_err("failed to alloc bind callback.\n"); + goto free_user_arg; + } + atomic_inc(&user_arg->notifier->incomplete_cnt); + cb->callback = uburma_cmd_bind_jetty_async_callback; + cb->user_arg = user_arg; + return cb; + +free_user_arg: + kfree(user_arg); +free_notify: + kfree(notify); +put_notifier: + uobj_put(¬ifier->uobj); + return NULL; +} + +static void uburma_delete_bind_callback(struct ubcore_bind_cb *cb) +{ + struct uburma_bind_jetty_async_user_arg *user_arg; + + if (cb == NULL) + return; + user_arg = cb->user_arg; + atomic_dec(&user_arg->notifier->incomplete_cnt); + uobj_put(&user_arg->notifier->uobj); + kfree(user_arg->notify); + kfree(user_arg); + kfree(cb); +} + +static int uburma_cmd_bind_jetty_async(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_tjetty_uobj *uburma_tjetty; + struct uburma_cmd_bind_jetty_async arg; + struct uburma_uobj *tjetty_uobj; + struct uburma_uobj *jetty_uobj; + struct ubcore_tjetty *tjetty; + struct ubcore_udata udata; + struct ubcore_bind_cb *cb; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (uburma_get_jetty_tjetty_objs(file, arg.in.jetty_handle, + arg.in.tjetty_handle, &jetty_uobj, + &tjetty_uobj)) + return -EINVAL; + fill_udata(&udata, file->ucontext, &arg.udata); + + cb = uburma_create_bind_callback(&arg, file, jetty_uobj, tjetty_uobj); + if (IS_ERR_OR_NULL(cb)) { + uburma_log_err("failed to find notifier.\n"); + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return -ENOMEM; + } + + tjetty = (struct ubcore_tjetty *)tjetty_uobj->object; + ret = ubcore_bind_jetty_async(jetty_uobj->object, tjetty, + arg.in.timeout, cb, &udata); + if (ret != 0) { + uburma_log_err("bind jetty async failed.\n"); + uburma_delete_bind_callback(cb); + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; + } + + uburma_tjetty = (struct uburma_tjetty_uobj *)(tjetty_uobj); + uburma_tjetty->jetty_uobj = (struct uburma_jetty_uobj *)(jetty_uobj); + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) { + ubcore_unbind_jetty_async(jetty_uobj->object, 0, NULL); + uburma_delete_bind_callback(cb); + } + + uburma_put_jetty_tjetty_objs(jetty_uobj, tjetty_uobj); + return ret; +} + +static int uburma_cmd_unbind_jetty_async(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_unadvise_jetty arg; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + return uburma_unbind_jetty(file, true, arg.in.jetty_handle, + arg.in.tjetty_handle); +} + +static int uburma_cmd_create_notifier(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_create_notifier arg; + struct uburma_notifier_uobj *notifier; + struct uburma_uobj *uobj; + + uobj = uobj_alloc(UOBJ_CLASS_NOTIFIER, file); + if (IS_ERR_OR_NULL(uobj)) + return PTR_ERR(uobj); + + notifier = container_of(uobj, struct uburma_notifier_uobj, uobj); + uburma_init_jfe(¬ifier->jfe); + atomic_set(¬ifier->incomplete_cnt, 0); + + arg.out.fd = uobj->id; /* should get fd before commit uobj */ + uobj_alloc_commit(uobj); + + return uburma_tlv_append(hdr, (void *)&arg); +} + +static int uburma_cmd_get_tp_list(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_get_tp_list *arg = NULL; + struct ubcore_tp_info *tp_list = NULL; + struct ubcore_get_tp_cfg cfg = { 0 }; + struct ubcore_udata udata = { 0 }; + uint32_t tp_cnt = 0; + int ret; + + arg = kcalloc(1, sizeof(struct uburma_cmd_get_tp_list), GFP_KERNEL); + if (IS_ERR_OR_NULL(arg)) + return -ENOMEM; + ret = uburma_tlv_parse(hdr, arg); + if (ret != 0) + goto free_arg; + + if (arg->in.tp_cnt > UBURMA_CMD_MAX_TP_NUM || arg->in.tp_cnt == 0) { + uburma_log_err("Invalid tp_cnt: %u.\n", arg->in.tp_cnt); + ret = -EINVAL; + goto free_arg; + } + + cfg.flag.value = arg->in.flag; + cfg.trans_mode = arg->in.trans_mode; + (void)memcpy(&cfg.local_eid, arg->in.local_eid, + sizeof(union ubcore_eid)); + (void)memcpy(&cfg.peer_eid, arg->in.peer_eid, sizeof(union ubcore_eid)); + tp_cnt = arg->in.tp_cnt; + fill_udata(&udata, file->ucontext, &arg->udata); + + tp_list = kcalloc(tp_cnt, sizeof(struct ubcore_tp_info), GFP_KERNEL); + if (IS_ERR_OR_NULL(tp_list)) { + ret = -ENOMEM; + goto free_arg; + } + ret = ubcore_get_tp_list(ubc_dev, &cfg, &tp_cnt, tp_list, &udata); + if (ret != 0) { + uburma_log_err("Failed to get tp list, ret: %d.\n", ret); + goto free_tp_list; + } + arg->out.tp_cnt = tp_cnt; + (void)memcpy(arg->out.tp_handle, tp_list, + tp_cnt * sizeof(struct ubcore_tp_info)); + + ret = uburma_tlv_append(hdr, arg); +free_tp_list: + kfree(tp_list); +free_arg: + kfree(arg); + return ret; +} + +static int uburma_cmd_set_tp_attr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_tp_attr_value tp_attr = { 0 }; + struct uburma_cmd_set_tp_attr arg = { 0 }; + struct ubcore_udata udata = { 0 }; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (sizeof(arg.in.tp_attr) != sizeof(struct ubcore_tp_attr_value)) { + uburma_log_err("Invalid parameter.\n"); + return -EINVAL; + } + fill_udata(&udata, file->ucontext, &arg.udata); + (void)memcpy(&tp_attr, arg.in.tp_attr, sizeof(arg.in.tp_attr)); + + ret = ubcore_set_tp_attr(ubc_dev, arg.in.tp_handle, arg.in.tp_attr_cnt, + arg.in.tp_attr_bitmap, &tp_attr, &udata); + if (ret != 0) + uburma_log_err( + "Failed to set tp attribution values, ret: %d.\n", ret); + + return ret; +} + +static int uburma_cmd_get_tp_attr(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_tp_attr_value tp_attr = { 0 }; + struct uburma_cmd_get_tp_attr arg = { 0 }; + struct ubcore_udata udata = { 0 }; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + if (sizeof(arg.out.tp_attr) != sizeof(struct ubcore_tp_attr_value)) { + uburma_log_err("Invalid parameter.\n"); + return -EINVAL; + } + fill_udata(&udata, file->ucontext, &arg.udata); + + ret = ubcore_get_tp_attr(ubc_dev, arg.in.tp_handle, + &arg.out.tp_attr_cnt, &arg.out.tp_attr_bitmap, + &tp_attr, &udata); + if (ret != 0) { + uburma_log_err( + "Failed to get tp attribution values, ret: %d.\n", ret); + return ret; + } + (void)memcpy(arg.out.tp_attr, &tp_attr, + sizeof(struct ubcore_tp_attr_value)); + + return uburma_tlv_append(hdr, &arg); +} + +static int uburma_cmd_exchange_tp_info(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_exchange_tp_info arg; + struct ubcore_get_tp_cfg get_tcp_cfg = {0}; + struct ubcore_udata udata = {0}; + uint64_t peer_tp_handle; + uint32_t rx_psn; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + get_tcp_cfg = arg.in.get_tp_cfg; + ret = ubcore_exchange_tp_info(ubc_dev, &get_tcp_cfg, arg.in.tp_handle, + arg.in.tx_psn, &peer_tp_handle, &rx_psn, &udata); + if (ret != 0) { + uburma_log_err("Failed to exchange tp info, ret: %d.\n", ret); + return ret; + } + arg.out.peer_tp_handle = peer_tp_handle; + arg.out.rx_psn = rx_psn; + + ret = uburma_tlv_append(hdr, &arg); + return ret; +} + +static int uburma_cmd_import_jetty_ex(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct uburma_cmd_import_jetty_ex arg = { 0 }; + struct ubcore_tjetty_cfg cfg = { 0 }; + struct ubcore_udata udata = { 0 }; + struct ubcore_tjetty *tjetty; + struct uburma_uobj *uobj; + int ret; + + ret = uburma_tlv_parse(hdr, &arg); + if (ret != 0) + return ret; + + uobj = uobj_alloc(UOBJ_CLASS_TARGET_JETTY, file); + if (IS_ERR(uobj)) { + uburma_log_err("Failed to alloc uobj, class_id: %d.\n", + UOBJ_CLASS_TARGET_JETTY); + return -ENOMEM; + } + + (void)memcpy(cfg.id.eid.raw, arg.in.eid, UBCORE_EID_SIZE); + cfg.id.id = arg.in.id; + cfg.token_value.token = arg.in.token; + cfg.trans_mode = (enum ubcore_transport_mode)arg.in.trans_mode; + cfg.flag.value = arg.in.flag; + cfg.policy = (enum ubcore_jetty_grp_policy)arg.in.policy; + cfg.type = (enum ubcore_target_type)arg.in.type; + cfg.eid_index = file->ucontext->eid_index; + + active_tp_cfg.tp_handle.value = arg.in.tp_handle; + active_tp_cfg.peer_tp_handle.value = arg.in.peer_tp_handle; + active_tp_cfg.tag = arg.in.tag; + active_tp_cfg.tp_attr.tx_psn = arg.in.tx_psn; + active_tp_cfg.tp_attr.rx_psn = arg.in.rx_psn; + fill_udata(&udata, file->ucontext, &arg.udata); + + tjetty = ubcore_import_jetty_ex(ubc_dev, &cfg, &active_tp_cfg, &udata); + if (IS_ERR_OR_NULL(tjetty)) { + uburma_log_err("ubcore_import_jetty failed.\n"); + uobj_alloc_abort(uobj); + return PTR_ERR(tjetty); + } + + uobj->object = tjetty; + arg.out.handle = (uint64_t)uobj->id; + if (tjetty->vtpn != NULL) + arg.out.tpn = tjetty->vtpn->vtpn; + else if (tjetty->tp != NULL) + arg.out.tpn = tjetty->tp->tpn; + else + arg.out.tpn = UBURMA_INVALID_TPN; + + ret = uburma_tlv_append(hdr, &arg); + if (ret != 0) { + (void)ubcore_unimport_jetty(tjetty); + uobj_alloc_abort(uobj); + return ret; + } + uobj_alloc_commit(uobj); + return 0; +} + +typedef int (*uburma_cmd_handler)(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr); + +static uburma_cmd_handler g_uburma_cmd_handlers[] = { + [0] = NULL, + [UBURMA_CMD_CREATE_CTX] = uburma_cmd_create_ctx, + [UBURMA_CMD_ALLOC_TOKEN_ID] = uburma_cmd_alloc_token_id, + [UBURMA_CMD_FREE_TOKEN_ID] = uburma_cmd_free_token_id, + [UBURMA_CMD_REGISTER_SEG] = uburma_cmd_register_seg, + [UBURMA_CMD_UNREGISTER_SEG] = uburma_cmd_unregister_seg, + [UBURMA_CMD_IMPORT_SEG] = uburma_cmd_import_seg, + [UBURMA_CMD_UNIMPORT_SEG] = uburma_cmd_unimport_seg, + [UBURMA_CMD_CREATE_JFR] = uburma_cmd_create_jfr, + [UBURMA_CMD_MODIFY_JFR] = uburma_cmd_modify_jfr, + [UBURMA_CMD_QUERY_JFR] = uburma_cmd_query_jfr, + [UBURMA_CMD_DELETE_JFR] = uburma_cmd_delete_jfr, + [UBURMA_CMD_CREATE_JFS] = uburma_cmd_create_jfs, + [UBURMA_CMD_MODIFY_JFS] = uburma_cmd_modify_jfs, + [UBURMA_CMD_QUERY_JFS] = uburma_cmd_query_jfs, + [UBURMA_CMD_DELETE_JFS] = uburma_cmd_delete_jfs, + [UBURMA_CMD_CREATE_JFC] = uburma_cmd_create_jfc, + [UBURMA_CMD_MODIFY_JFC] = uburma_cmd_modify_jfc, + [UBURMA_CMD_DELETE_JFC] = uburma_cmd_delete_jfc, + [UBURMA_CMD_CREATE_JFCE] = uburma_cmd_create_jfce, + [UBURMA_CMD_IMPORT_JFR] = uburma_cmd_import_jfr, + [UBURMA_CMD_UNIMPORT_JFR] = uburma_cmd_unimport_jfr, + [UBURMA_CMD_CREATE_JETTY] = uburma_cmd_create_jetty, + [UBURMA_CMD_MODIFY_JETTY] = uburma_cmd_modify_jetty, + [UBURMA_CMD_QUERY_JETTY] = uburma_cmd_query_jetty, + [UBURMA_CMD_DELETE_JETTY] = uburma_cmd_delete_jetty, + [UBURMA_CMD_IMPORT_JETTY] = uburma_cmd_import_jetty, + [UBURMA_CMD_UNIMPORT_JETTY] = uburma_cmd_unimport_jetty, + [UBURMA_CMD_ADVISE_JFR] = uburma_cmd_advise_jfr, + [UBURMA_CMD_UNADVISE_JFR] = uburma_cmd_unadvise_jfr, + [UBURMA_CMD_ADVISE_JETTY] = uburma_cmd_advise_jetty, + [UBURMA_CMD_UNADVISE_JETTY] = uburma_cmd_unadvise_jetty, + [UBURMA_CMD_BIND_JETTY] = uburma_cmd_bind_jetty, + [UBURMA_CMD_UNBIND_JETTY] = uburma_cmd_unbind_jetty, + [UBURMA_CMD_CREATE_JETTY_GRP] = uburma_cmd_create_jetty_grp, + [UBURMA_CMD_DESTROY_JETTY_GRP] = uburma_cmd_delete_jetty_grp, + [UBURMA_CMD_USER_CTL] = uburma_cmd_user_ctl, + [UBURMA_CMD_GET_EID_LIST] = uburma_cmd_get_eid_list, + [UBURMA_CMD_GET_NETADDR_LIST] = uburma_cmd_get_net_addr_list, + [UBURMA_CMD_MODIFY_TP] = uburma_cmd_modify_tp, + [UBURMA_CMD_QUERY_DEV_ATTR] = uburma_cmd_query_device_attr, + [UBURMA_CMD_IMPORT_JETTY_ASYNC] = uburma_cmd_import_jetty_async, + [UBURMA_CMD_UNIMPORT_JETTY_ASYNC] = uburma_cmd_unimport_jetty_async, + [UBURMA_CMD_BIND_JETTY_ASYNC] = uburma_cmd_bind_jetty_async, + [UBURMA_CMD_UNBIND_JETTY_ASYNC] = uburma_cmd_unbind_jetty_async, + [UBURMA_CMD_CREATE_NOTIFIER] = uburma_cmd_create_notifier, + [UBURMA_CMD_GET_TP_LIST] = uburma_cmd_get_tp_list, + [UBURMA_CMD_IMPORT_JETTY_EX] = uburma_cmd_import_jetty_ex, + [UBURMA_CMD_IMPORT_JFR_EX] = uburma_cmd_import_jfr_ex, + [UBURMA_CMD_BIND_JETTY_EX] = uburma_cmd_bind_jetty_ex, + [UBURMA_CMD_DELETE_JFS_BATCH] = uburma_cmd_delete_jfs_batch, + [UBURMA_CMD_DELETE_JFR_BATCH] = uburma_cmd_delete_jfr_batch, + [UBURMA_CMD_DELETE_JFC_BATCH] = uburma_cmd_delete_jfc_batch, + [UBURMA_CMD_DELETE_JETTY_BATCH] = uburma_cmd_delete_jetty_batch, + [UBURMA_CMD_SET_TP_ATTR] = uburma_cmd_set_tp_attr, + [UBURMA_CMD_GET_TP_ATTR] = uburma_cmd_get_tp_attr, + [UBURMA_CMD_EXCHANGE_TP_INFO] = uburma_cmd_exchange_tp_info, +}; + +static int uburma_cmd_parse(struct ubcore_device *ubc_dev, + struct uburma_file *file, + struct uburma_cmd_hdr *hdr) +{ + if (hdr->command < UBURMA_CMD_CREATE_CTX || + hdr->command >= UBURMA_CMD_MAX || + g_uburma_cmd_handlers[hdr->command] == NULL) { + uburma_log_err("bad uburma command: %d.\n", (int)hdr->command); + return -EINVAL; + } + return g_uburma_cmd_handlers[hdr->command](ubc_dev, file, hdr); +} + +static inline bool is_cmd_ucontext_free(struct uburma_cmd_hdr *hdr) +{ + return (hdr->command == UBURMA_CMD_CREATE_CTX || + hdr->command == UBURMA_CMD_GET_EID_LIST || + hdr->command == UBURMA_CMD_QUERY_DEV_ATTR); +} + +long uburma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uburma_cmd_hdr *user_hdr = (struct uburma_cmd_hdr *)arg; + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + struct uburma_cmd_hdr hdr; + struct uburma_file *file; + int srcu_idx; + long ret; + + if (filp == NULL || filp->private_data == NULL) { + uburma_log_err("invalid param"); + return -EINVAL; + } + file = filp->private_data; + ubu_dev = file->ubu_dev; + if (ubu_dev == NULL) { + uburma_log_err("invalid param"); + return -EINVAL; + } + uburma_cmd_inc(ubu_dev); + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + uburma_log_err("can not find ubcore device.\n"); + ret = -EIO; + goto srcu_unlock; + } + + if (cmd == UBURMA_CMD) { + ret = (long)copy_from_user(&hdr, user_hdr, + sizeof(struct uburma_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBURMA_CMD_MAX_ARGS_SIZE) || + (hdr.args_len == 0 || hdr.args_addr == 0)) { + uburma_log_err( + "invalid input, hdr.command: %d, ret:%ld, hdr.args_len: %d\n", + hdr.command, ret, hdr.args_len); + ret = -EINVAL; + } else { + if (!is_cmd_ucontext_free(&hdr)) { + /* Check ucontext */ + down_read(&file->ucontext_rwsem); + if (file->ucontext == NULL) + ret = -EINVAL; + else + ret = (long)uburma_cmd_parse( + ubc_dev, file, &hdr); + up_read(&file->ucontext_rwsem); + } else { + ret = (long)uburma_cmd_parse(ubc_dev, file, + &hdr); + } + } + } else { + uburma_log_err("bad ioctl command.\n"); + ret = -ENOIOCTLCMD; + } + +srcu_unlock: + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + uburma_cmd_dec(ubu_dev); + return ret; +} diff --git a/drivers/ub/urma/uburma/uburma_cmd.h b/drivers/ub/urma/uburma/uburma_cmd.h new file mode 100644 index 000000000000..d0d6ed209156 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd.h @@ -0,0 +1,1086 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd header file + * Author: Qian Guoxin + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBURMA_CMD_H +#define UBURMA_CMD_H + +#include +#include +#include +#include +#include + +#include +#include "uburma_types.h" + +struct uburma_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBURMA_CMD_MAX_ARGS_SIZE 25600 +#define UBURMA_CMD_MAX_PORT_CNT 8 +#define UBURMA_CMD_TP_ATTR_BYTES 128 + +/* only for uburma device ioctl */ +#define UBURMA_CMD_MAGIC 'U' +#define UBURMA_CMD _IOWR(UBURMA_CMD_MAGIC, 1, struct uburma_cmd_hdr) + +enum uburma_cmd { + UBURMA_CMD_CREATE_CTX = 1, + UBURMA_CMD_ALLOC_TOKEN_ID, + UBURMA_CMD_FREE_TOKEN_ID, + UBURMA_CMD_REGISTER_SEG, + UBURMA_CMD_UNREGISTER_SEG, + UBURMA_CMD_IMPORT_SEG, + UBURMA_CMD_UNIMPORT_SEG, + UBURMA_CMD_CREATE_JFS, + UBURMA_CMD_MODIFY_JFS, + UBURMA_CMD_QUERY_JFS, + UBURMA_CMD_DELETE_JFS, + UBURMA_CMD_CREATE_JFR, + UBURMA_CMD_MODIFY_JFR, + UBURMA_CMD_QUERY_JFR, + UBURMA_CMD_DELETE_JFR, + UBURMA_CMD_CREATE_JFC, + UBURMA_CMD_MODIFY_JFC, + UBURMA_CMD_DELETE_JFC, + UBURMA_CMD_CREATE_JFCE, + UBURMA_CMD_IMPORT_JFR, + UBURMA_CMD_UNIMPORT_JFR, + UBURMA_CMD_CREATE_JETTY, + UBURMA_CMD_MODIFY_JETTY, + UBURMA_CMD_QUERY_JETTY, + UBURMA_CMD_DELETE_JETTY, + UBURMA_CMD_IMPORT_JETTY, + UBURMA_CMD_UNIMPORT_JETTY, + UBURMA_CMD_ADVISE_JFR, + UBURMA_CMD_UNADVISE_JFR, + UBURMA_CMD_ADVISE_JETTY, + UBURMA_CMD_UNADVISE_JETTY, + UBURMA_CMD_BIND_JETTY, + UBURMA_CMD_UNBIND_JETTY, + UBURMA_CMD_CREATE_JETTY_GRP, + UBURMA_CMD_DESTROY_JETTY_GRP, + UBURMA_CMD_USER_CTL, + UBURMA_CMD_GET_EID_LIST, + UBURMA_CMD_GET_NETADDR_LIST, + UBURMA_CMD_MODIFY_TP, + UBURMA_CMD_QUERY_DEV_ATTR, + UBURMA_CMD_IMPORT_JETTY_ASYNC, + UBURMA_CMD_UNIMPORT_JETTY_ASYNC, + UBURMA_CMD_BIND_JETTY_ASYNC, + UBURMA_CMD_UNBIND_JETTY_ASYNC, + UBURMA_CMD_CREATE_NOTIFIER, + UBURMA_CMD_GET_TP_LIST, + UBURMA_CMD_IMPORT_JETTY_EX, + UBURMA_CMD_IMPORT_JFR_EX, + UBURMA_CMD_BIND_JETTY_EX, + UBURMA_CMD_DELETE_JFS_BATCH, + UBURMA_CMD_DELETE_JFR_BATCH, + UBURMA_CMD_DELETE_JFC_BATCH, + UBURMA_CMD_DELETE_JETTY_BATCH, + UBURMA_CMD_SET_TP_ATTR, + UBURMA_CMD_GET_TP_ATTR, + UBURMA_CMD_EXCHANGE_TP_INFO, + UBURMA_CMD_MAX +}; + +struct uburma_cmd_udrv_priv { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; +}; + +union uburma_cmd_token_id_flag { + struct { + uint32_t multi_seg : 1; + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +struct uburma_cmd_create_ctx { + struct { + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t eid_index; + } in; + struct { + int async_fd; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_alloc_token_id { + struct { + uint32_t token_id; + uint64_t handle; /* handle of the allocated token_id obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; + union uburma_cmd_token_id_flag flag; +}; + +struct uburma_cmd_free_token_id { + struct { + uint64_t handle; /* handle of the allocated token_id obj in kernel */ + uint32_t token_id; + } in; +}; + +struct uburma_cmd_register_seg { + struct { + uint64_t va; + uint64_t len; + uint32_t token_id; + uint64_t token_id_handle; + uint32_t token; + uint32_t flag; + } in; + struct { + uint32_t token_id; + uint64_t handle; /* handle of the allocated seg obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unregister_seg { + struct { + uint64_t handle; /* handle of seg, used to find seg obj in kernel */ + } in; +}; + +struct uburma_cmd_import_seg { + struct { + uint8_t eid[UBCORE_EID_SIZE]; + uint64_t va; + uint64_t len; + uint32_t flag; + uint32_t token; + uint32_t token_id; + uint64_t mva; + } in; + struct { + uint64_t handle; /* handle of the allocated tseg obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unimport_seg { + struct { + uint64_t handle; /* handle of the seg to be unimported */ + } in; +}; + +struct uburma_cmd_create_jfr { + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t max_sge; + uint8_t min_rnr_timer; + uint32_t jfc_id; + uint64_t jfc_handle; + uint32_t token; + uint32_t id; + uint64_t urma_jfr; /* urma jfr pointer */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint8_t max_sge; + uint64_t handle; /* handle of the allocated jfr obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jfr { + struct { + uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ + uint32_t mask; /* see urma_jfr_attr_mask_t */ + uint32_t rx_threshold; + uint32_t state; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_query_jfr { + struct { + uint64_t handle; /* handle of the allocated jfr obj in kernel */ + } in; + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t max_sge; + uint8_t min_rnr_timer; + uint32_t token; + uint32_t id; + + uint32_t rx_threshold; + uint32_t state; + } out; +}; + +struct uburma_cmd_delete_jfr { + struct { + uint64_t handle; /* handle of jfr, used to find jfr obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_delete_jfr_batch { + struct { + uint32_t async_events_reported; + uint32_t bad_jfr_index; + } out; + struct { + uint32_t jfr_num; + uint64_t jfr_ptr; + } in; +}; + +struct uburma_cmd_create_jfs { + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint32_t jfc_id; + uint64_t jfc_handle; + uint64_t urma_jfs; /* urma jfs pointer */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint64_t handle; /* handle of the allocated jfs obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jfs { + struct { + uint64_t handle; /* handle of jfs, used to find jfs obj in kernel */ + uint32_t mask; /* see urma_jfs_attr_mask_t */ + uint32_t state; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_query_jfs { + struct { + uint64_t handle; /* handle of the allocated jfs obj in kernel */ + } in; + struct { + uint32_t depth; + uint32_t flag; + uint32_t trans_mode; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + + uint32_t state; + } out; +}; + +struct uburma_cmd_delete_jfs { + struct { + uint64_t handle; /* handle of jfs, used to find jfs obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_delete_jfs_batch { + struct { + uint32_t async_events_reported; + uint32_t bad_jfs_index; + } out; + struct { + uint32_t jfs_num; + uint64_t jfs_ptr; + } in; +}; + +struct uburma_cmd_create_jfc { + struct { + uint32_t depth; /* in terms of CQEBB */ + uint32_t flag; + int jfce_fd; + uint64_t urma_jfc; /* urma jfc pointer */ + uint32_t ceqn; /* [Optional] event queue id */ + } in; + struct { + uint32_t id; + uint32_t depth; + uint64_t handle; /* handle of the allocated jfc obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jfc { + struct { + uint64_t handle; /* handle of jfc, used to find jfc obj in kernel */ + uint32_t mask; /* see urma_jfc_attr_mask_t */ + uint16_t moderate_count; + uint16_t moderate_period; /* in micro seconds */ + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jfc { + struct { + uint64_t handle; /* handle of jfc, used to find jfc obj in kernel */ + } in; + struct { + uint32_t comp_events_reported; + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_delete_jfc_batch { + struct { + uint32_t comp_events_reported; + uint32_t async_events_reported; + uint32_t bad_jfc_index; + } out; + struct { + uint32_t jfc_num; + uint64_t jfc_ptr; + } in; +}; + +struct uburma_cmd_create_jfce { + struct { + int fd; + } out; +}; + +struct uburma_cmd_import_jfr { + struct { + /* correspond to urma_jfr_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t id; + uint32_t flag; + /* correspond to urma_token_t */ + uint32_t token; + uint32_t trans_mode; + } in; + struct { + uint32_t tpn; + uint64_t handle; /* handle of the allocated tjfr obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_import_jfr_ex { + struct { + /* correspond to ubcore_jfr_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t id; + uint32_t flag; + /* correspond to ubcore_token */ + uint32_t token; + uint32_t trans_mode; + /* correspond to struct ubcore_active_tp_cfg */ + uint64_t tp_handle; + uint64_t peer_tp_handle; + uint64_t tag; + uint32_t tx_psn; + uint32_t rx_psn; + } in; + struct { + uint32_t tpn; + uint32_t reserved; + uint64_t handle; /* handle of the allocated tjfr obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_import_jfr_ex_t] */ + +struct uburma_cmd_unimport_jfr { + struct { + uint64_t handle; /* handle of tjfr, used to find tjfr obj in kernel */ + } in; +}; + +struct uburma_cmd_create_jetty { + struct { + uint32_t id; /* user may assign id */ + uint32_t jetty_flag; + + uint32_t jfs_depth; + uint32_t jfs_flag; + uint32_t trans_mode; + uint8_t priority; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint32_t max_inline_data; + uint8_t rnr_retry; + uint8_t err_timeout; + uint32_t send_jfc_id; + uint64_t send_jfc_handle; /* handle of the related send jfc */ + + uint32_t jfr_depth; + uint32_t jfr_flag; + uint8_t max_recv_sge; + uint8_t min_rnr_timer; + + uint32_t recv_jfc_id; + uint64_t recv_jfc_handle; /* handle of the related recv jfc */ + uint32_t token; + + uint32_t jfr_id; /* shared jfr */ + uint64_t jfr_handle; /* handle of the shared jfr */ + + uint64_t jetty_grp_handle; /* handle of the jetty_grp */ + uint8_t is_jetty_grp; + + uint64_t urma_jetty; /* urma jetty pointer */ + } in; + struct { + uint32_t id; /* jetty id allocated by ubcore */ + uint64_t handle; /* handle of the allocated jetty obj in kernel */ + uint32_t jfs_depth; + uint32_t jfr_depth; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_modify_jetty { + struct { + uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ + uint32_t mask; /* see urma_jetty_attr_mask_t */ + uint32_t rx_threshold; + uint32_t state; + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_query_jetty { + struct { + uint64_t handle; /* handle of the allocated jetty obj in kernel */ + } in; + struct { + uint32_t id; /* user may assign id */ + uint32_t jetty_flag; + + uint32_t jfs_depth; + uint32_t jfr_depth; + uint32_t jfs_flag; + uint32_t jfr_flag; + uint32_t trans_mode; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint8_t max_recv_sge; + uint32_t max_inline_data; + uint8_t priority; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t err_timeout; + uint8_t min_rnr_timer; + uint32_t jfr_id; + uint32_t token; + + uint32_t rx_threshold; + uint32_t state; + } out; +}; + +struct uburma_cmd_delete_jetty { + struct { + uint64_t handle; /* handle of jetty, used to find jetty obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_delete_jetty_batch { + struct { + uint32_t async_events_reported; + uint32_t bad_jetty_index; + } out; + struct { + uint32_t jetty_num; + uint64_t jetty_ptr; + } in; +}; + +struct uburma_cmd_import_jetty { + struct { + /* correspond to urma_jetty_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t id; + uint32_t flag; + /* correspond to urma_token_t */ + uint32_t token; + uint32_t trans_mode; + uint32_t policy; + uint32_t type; + } in; + struct { + uint32_t tpn; + uint64_t handle; /* handle of the allocated tjetty obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_import_jetty_ex { + struct { + /* correspond to ubcore_jetty_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t id; + uint32_t flag; + /* correspond to ubcore_token */ + uint32_t token; + uint32_t trans_mode; + uint32_t policy; + uint32_t type; + /* correspond to struct ubcore_active_tp_cfg */ + uint64_t tp_handle; + uint64_t peer_tp_handle; + uint64_t tag; + uint32_t tx_psn; + uint32_t rx_psn; + } in; + struct { + uint32_t tpn; + uint32_t reserved; + uint64_t handle; /* handle of the allocated tjetty obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_import_jetty_ex_t] */ + +struct uburma_cmd_unimport_jetty { + struct { + uint64_t handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_advise_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unadvise_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_bind_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; + struct { + uint32_t tpn; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_bind_jetty_ex { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + /* correspond to ubcore_active_tp_cfg */ + uint64_t tp_handle; + uint64_t peer_tp_handle; + uint64_t tag; + uint32_t tx_psn; + uint32_t rx_psn; + } in; + struct { + uint32_t tpn; + uint32_t reserved; + } out; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_bind_jetty_ex_t] */ + +struct uburma_cmd_unbind_jetty { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + } in; +}; + +struct uburma_cmd_create_jetty_grp { + struct { + char name[UBCORE_JETTY_GRP_MAX_NAME]; + uint32_t token; + uint32_t id; + uint32_t policy; + uint32_t flag; + uint64_t urma_jetty_grp; /* urma jetty group pointer */ + } in; + struct { + uint32_t id; /* jetty group id allocated by ubcore */ + uint64_t handle; /* handle of the allocated jetty group obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_delete_jetty_grp { + struct { + uint64_t handle; /* handle of jetty group, used to find jetty group obj in kernel */ + } in; + struct { + uint32_t async_events_reported; + } out; +}; + +struct uburma_cmd_get_eid_list { + struct { + uint32_t max_eid_cnt; + } in; + struct { + uint32_t eid_cnt; + struct ubcore_eid_info eid_list[UBCORE_MAX_EID_CNT]; + } out; +}; + +struct uburma_cmd_user_ctl { + struct { + uint64_t addr; + uint32_t len; + uint32_t opcode; + } in; /* struct [in] should be consistent with [urma_user_ctl_in_t] */ + struct { + uint64_t addr; + uint32_t len; + uint32_t rsv; + } out; /* struct [out] should be consistent with [urma_user_ctl_out_t] */ + struct { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; + } udrv; /* struct [udrv] should be consistent with [urma_udrv_t] */ +}; + +union uburma_cmd_tp_cfg_flag { + struct { + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t loopback : 1; + uint32_t dca_enable : 1; + /* for the bonding case, the hardware selects the port + * ignoring the port of the tp context and + * selects the port based on the hash value + * along with the information in the bonding group table. + */ + uint32_t bonding : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; + +struct uburma_cmd_user_tp_cfg { + union uburma_cmd_tp_cfg_flag flag; /* flag of initial tp */ + enum ubcore_transport_mode trans_mode; /* tranport layer attributes */ + uint8_t retry_num; + uint8_t retry_factor; /* for calculate the time slot to retry */ + uint8_t ack_timeout; + uint8_t dscp; /* priority */ + uint32_t oor_cnt; /* OOR window size: by packet */ +}; + +struct uburma_cmd_net_addr { + sa_family_t sin_family; /* AF_INET/AF_INET6 */ + union { + struct in_addr in4; + struct in6_addr in6; + }; + uint64_t vlan; + uint8_t mac[UBCORE_MAC_BYTES]; + uint32_t prefix_len; +}; + +struct uburma_cmd_tp_attr { + union ubcore_tp_mod_flag flag; /* consistend with urma_tp_mod_flag */ + uint32_t peer_tpn; + enum ubcore_tp_state state; + uint32_t tx_psn; + uint32_t rx_psn; + enum ubcore_mtu mtu; + uint8_t cc_pattern_idx; + uint32_t oos_cnt; /* out of standing packet cnt */ + uint32_t local_net_addr_idx; + struct uburma_cmd_net_addr peer_net_addr; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint8_t udp_range; + uint8_t hop_limit; + uint32_t flow_label; + uint8_t port_id; + uint8_t mn; /* 0~15, a packet contains only one msg if mn is set as 0 */ + enum ubcore_transport_type peer_trans_type; +}; + +union uburma_cmd_tp_attr_mask { + struct { + uint32_t flag : 1; + uint32_t peer_tpn : 1; + uint32_t state : 1; + uint32_t tx_psn : 1; + uint32_t rx_psn : 1; /* modify both rx psn and tx psn when restore tp */ + uint32_t mtu : 1; + uint32_t cc_pattern_idx : 1; + uint32_t oos_cnt : 1; + uint32_t local_net_addr_idx : 1; + uint32_t peer_net_addr : 1; + uint32_t data_udp_start : 1; + uint32_t ack_udp_start : 1; + uint32_t udp_range : 1; + uint32_t hop_limit : 1; + uint32_t flow_label : 1; + uint32_t port_id : 1; + uint32_t mn : 1; + uint32_t peer_trans_type : 1; /* Only for user tp connection */ + uint32_t reserved : 14; + } bs; + uint32_t value; +}; + +struct uburma_cmd_net_addr_info { + struct ubcore_net_addr netaddr; + uint32_t index; +}; + +struct uburma_cmd_get_net_addr_list { + struct { + uint32_t max_netaddr_cnt; + } in; + struct { + uint32_t netaddr_cnt; + uint64_t addr; /* containing array of struct uburma_cmd_net_addr_info */ + uint64_t len; + } out; +}; + +struct uburma_cmd_modify_tp { + struct { + uint32_t tpn; + struct uburma_cmd_user_tp_cfg tp_cfg; + struct uburma_cmd_tp_attr attr; + union uburma_cmd_tp_attr_mask mask; + } in; +}; /* this struct should be consistent [urma_cmd_modify_tp_t] */ + +struct uburma_cmd_device_cap { + union ubcore_device_feat feature; /* refer to urma_device_feature_t */ + uint32_t max_jfc; + uint32_t max_jfs; + uint32_t max_jfr; + uint32_t max_jetty; + uint32_t max_jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t max_jfc_depth; + uint32_t max_jfs_depth; + uint32_t max_jfr_depth; + uint32_t max_jfs_inline_len; + uint32_t max_jfs_sge; + uint32_t max_jfs_rsge; + uint32_t max_jfr_sge; + uint64_t max_msg_size; + uint32_t max_read_size; + uint32_t max_write_size; + uint32_t max_cas_size; + uint32_t max_swap_size; + uint32_t max_fetch_and_add_size; + uint32_t max_fetch_and_sub_size; + uint32_t max_fetch_and_and_size; + uint32_t max_fetch_and_or_size; + uint32_t max_fetch_and_xor_size; + union ubcore_atomic_feat atomic_feat; /* refer to urma_atomic_feature_t */ + uint16_t trans_mode; + uint16_t sub_trans_mode_cap; + uint16_t congestion_ctrl_alg; + uint32_t ceq_cnt; + uint32_t max_tp_in_tpg; + uint32_t max_eid_cnt; + uint64_t page_size_cap; + uint32_t max_oor_cnt; + uint32_t mn; + uint32_t max_netaddr_cnt; +}; /* this struct should be consistent [urma_device_cap_t] */ + +struct uburma_cmd_port_attr { + enum ubcore_mtu max_mtu; /* MTU_256, MTU_512, MTU_1024 */ + enum ubcore_port_state state; /* PORT_DOWN, PORT_INIT, PORT_ACTIVE */ + enum ubcore_link_width active_width; /* link width: X1, X2, X4 */ + enum ubcore_speed active_speed; /* bandwidth */ + enum ubcore_mtu active_mtu; +}; /* this struct should be consistent [struct urma_port_attr] */ + +struct uburma_cmd_device_attr { + struct ubcore_guid guid; /* [Public] */ + struct uburma_cmd_device_cap + dev_cap; /* [Public] capabilities of device. */ + uint8_t port_cnt; /* [Public] port number of device. */ + struct uburma_cmd_port_attr port_attr[UBURMA_CMD_MAX_PORT_CNT]; + uint32_t reserved_jetty_id_min; + uint32_t reserved_jetty_id_max; +}; /* this struct should be consistent [urma_device_attr_t] */ + +struct uburma_cmd_query_device_attr { + struct { + char dev_name[UBCORE_MAX_DEV_NAME]; + } in; + struct { + struct uburma_cmd_device_attr attr; + } out; +}; /* this struct should be consistent [urma_cmd_query_device_attr_t] */ + +struct uburma_cmd_import_jetty_async { + struct { + /* correspond to urma_jetty_id */ + uint8_t eid[UBCORE_EID_SIZE]; + uint32_t id; + uint32_t flag; + /* correspond to urma_token_t */ + uint32_t token; + uint32_t trans_mode; + uint32_t policy; + uint32_t type; + uint64_t urma_tjetty; /* urma tjetty pointer */ + uint64_t user_ctx; + int fd; + int timeout; + } in; + struct { + uint32_t tpn; + uint64_t handle; /* handle of the allocated tjetty obj in kernel */ + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unimport_jetty_async { + struct { + uint64_t handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_bind_jetty_async { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + uint64_t urma_tjetty; /* urma tjetty pointer */ + uint64_t urma_jetty; /* urma jetty pointer */ + int fd; + uint64_t user_ctx; + int timeout; + } in; + struct { + uint32_t tpn; + } out; + struct uburma_cmd_udrv_priv udata; +}; + +struct uburma_cmd_unbind_jetty_async { + struct { + uint64_t jetty_handle; /* handle of jetty, used to find jetty obj in kernel */ + uint64_t tjetty_handle; /* handle of tjetty, used to find tjetty obj in kernel */ + } in; +}; + +struct uburma_cmd_create_notifier { + struct { + int fd; + } out; +}; + +#define UBURMA_CMD_MAX_TP_NUM 128 + +struct uburma_cmd_get_tp_list { + struct { + uint32_t flag; + uint32_t trans_mode; + uint8_t local_eid[UBCORE_EID_SIZE]; + uint8_t peer_eid[UBCORE_EID_SIZE]; + uint32_t tp_cnt; + uint32_t reserved; + } in; + struct { + uint32_t tp_cnt; + uint32_t reserved; + uint64_t tp_handle[UBURMA_CMD_MAX_TP_NUM]; + } out; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_get_tp_list_t] */ + +struct uburma_cmd_set_tp_attr { + struct { + uint64_t tp_handle; + uint8_t tp_attr_cnt; + uint32_t tp_attr_bitmap; + uint8_t tp_attr[UBURMA_CMD_TP_ATTR_BYTES]; + } in; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_set_tp_attr_t] */ + +struct uburma_cmd_get_tp_attr { + struct { + uint64_t tp_handle; + } in; + struct { + uint8_t tp_attr_cnt; + uint32_t tp_attr_bitmap; + uint8_t tp_attr[UBURMA_CMD_TP_ATTR_BYTES]; + } out; + struct uburma_cmd_udrv_priv udata; +}; /* this struct should be consistent [urma_cmd_get_tp_attr_t] */ + +struct uburma_cmd_exchange_tp_info { + struct { + struct ubcore_get_tp_cfg get_tp_cfg; + uint64_t tp_handle; + uint32_t tx_psn; + } in; + struct { + uint64_t peer_tp_handle; + uint32_t rx_psn; + } out; +}; /* this struct should be consistent [urma_cmd_exchange_tp_info_t] */ + +/* only for event ioctl */ +#define MAX_JFCE_EVENT_CNT 16 +#define MAX_NOTIFY_CNT 16 +#define UBURMA_EVENT_CMD_MAGIC 'E' +#define JFCE_CMD_WAIT_EVENT 0 +#define JFAE_CMD_GET_ASYNC_EVENT 0 +#define NOTIFY_CMD_WAIT_NOTIFY 0 +#define UBURMA_CMD_WAIT_JFC \ + _IOWR(UBURMA_EVENT_CMD_MAGIC, JFCE_CMD_WAIT_EVENT, \ + struct uburma_cmd_hdr) +#define UBURMA_CMD_GET_ASYNC_EVENT \ + _IOWR(UBURMA_EVENT_CMD_MAGIC, JFAE_CMD_GET_ASYNC_EVENT, \ + struct uburma_cmd_hdr) +#define UBURMA_CMD_WAIT_NOTIFY \ + _IOWR(UBURMA_EVENT_CMD_MAGIC, NOTIFY_CMD_WAIT_NOTIFY, \ + struct uburma_cmd_hdr) +struct uburma_cmd_jfce_wait { + struct { + uint32_t max_event_cnt; + int time_out; + } in; + struct { + uint32_t event_cnt; + uint64_t event_data[MAX_JFCE_EVENT_CNT]; + } out; +}; + +struct uburma_cmd_async_event { + uint32_t event_type; + uint64_t event_data; + uint32_t pad; +}; + +enum uburma_notify_type { + UBURMA_IMPORT_JETTY_NOTIFY = 0, + UBURMA_BIND_JETTY_NOTIFY +}; + +struct uburma_notify { + enum uburma_notify_type type; + int status; + uint64_t user_ctx; + uint64_t urma_jetty; + uint32_t vtpn; +}; + +struct uburma_notify_event { + struct uburma_notify notify; + int jetty_handle; + int tjetty_handle; +}; + +struct uburma_cmd_wait_notify { + struct { + uint32_t cnt; + int timeout; + } in; + struct { + uint32_t cnt; + struct uburma_notify notify[MAX_NOTIFY_CNT]; + } out; +}; + +/* copy from user_space addr to kernel args */ +static inline int uburma_copy_from_user(void *args, const void *args_addr, + unsigned long args_size) +{ + int ret; + + ret = (int)copy_from_user(args, args_addr, args_size); + if (ret != 0) { + uburma_log_err("copy from user failed, ret:%d.\n", ret); + return -EFAULT; + } + return 0; +} + +/* copy kernel args to user_space addr */ +static inline int uburma_copy_to_user(void *args_addr, const void *args, + unsigned long args_size) +{ + int ret; + + ret = (int)copy_to_user(args_addr, args, args_size); + if (ret != 0) { + uburma_log_err("copy to user failed ret:%d.\n", ret); + return -EFAULT; + } + return 0; +} + +int uburma_unimport_jetty(struct uburma_file *file, bool async, + int tjetty_handle); +int uburma_unbind_jetty(struct uburma_file *file, bool async, int jetty_handle, + int tjetty_handle); + +void uburma_cmd_inc(struct uburma_device *ubu_dev); +void uburma_cmd_dec(struct uburma_device *ubu_dev); +void uburma_cmd_flush(struct uburma_device *ubu_dev); + +#endif /* UBURMA_CMD_H */ diff --git a/drivers/ub/urma/uburma/uburma_cmd_tlv.c b/drivers/ub/urma/uburma/uburma_cmd_tlv.c new file mode 100644 index 000000000000..822f69278fb4 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd_tlv.c @@ -0,0 +1,2087 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd tlv parse implement + * Author: Wang Hang + * Create: 2024-08-27 + * Note: + * History: 2024-08-27: Create file + */ + +#include "uburma_log.h" + +#include "uburma_cmd_tlv.h" + +#define UBURMA_CMD_TLV_MAX_LEN \ + (sizeof(struct uburma_cmd_attr) * UBURMA_CMD_OUT_TYPE_INIT) + +struct uburma_tlv_handler { + void (*fill_spec_in)(void *arg, struct uburma_cmd_spec *s); + size_t spec_in_len; + void (*fill_spec_out)(void *arg, struct uburma_cmd_spec *s); + size_t spec_out_len; +}; + +static inline void fill_spec(struct uburma_cmd_spec *spec, + uint16_t type, uint16_t field_size, + uint16_t el_num, uint16_t el_size, + uintptr_t data) +{ + *spec = (struct uburma_cmd_spec) { + .type = type, + .flag.bs = { .mandatory = 1 }, + .field_size = field_size, + .attr_data.bs = { .el_num = el_num, + .el_size = el_size }, + .data = data, + }; +} + +/** + * Fill spec with a field, which is a value or an array taken as a whole. + * @param v Full path of field, e.g. `arg->out.attr.dev_cap.feature` + */ +#define SPEC(spec, type, v) \ + fill_spec(spec, type, sizeof(v), 1, 0, (uintptr_t)(&(v))) + +/** + * Fill spec with a field, which belongs to an array of structs. + * @param v1 Full path of struct array, e.g. `arg->out.attr.port_attr` + * @param v2 Path relative to struct in array, e.g. `active_speed` + */ +#define SPEC_ARRAY(spec, type, v1, v2) \ + fill_spec(spec, type, sizeof((v1)->v2), ARRAY_SIZE(v1), \ + sizeof((v1)[0]), (uintptr_t)(&((v1)->v2))) + +static void +uburma_create_ctx_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_ctx *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_CTX_IN_EID, arg->in.eid); + SPEC(s++, CREATE_CTX_IN_EID_INDEX, arg->in.eid_index); + SPEC(s++, CREATE_CTX_IN_UDATA, arg->udata); +} + +static void +uburma_create_ctx_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_ctx *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_CTX_OUT_ASYNC_FD, arg->out.async_fd); + SPEC(s++, CREATE_CTX_OUT_UDATA, arg->udata); +} + +static void +uburma_alloc_token_id_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_alloc_token_id *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, ALLOC_TOKEN_ID_IN_FLAG, arg->flag); + SPEC(s++, ALLOC_TOKEN_ID_IN_UDATA, arg->udata); +} + +static void +uburma_alloc_token_id_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_alloc_token_id *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, ALLOC_TOKEN_ID_OUT_TOKEN_ID, arg->out.token_id); + SPEC(s++, ALLOC_TOKEN_ID_OUT_HANDLE, arg->out.handle); + SPEC(s++, ALLOC_TOKEN_ID_OUT_UDATA, arg->udata); +} + +static void +uburma_free_token_id_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_free_token_id *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, FREE_TOKEN_ID_IN_HANDLE, arg->in.handle); + SPEC(s++, FREE_TOKEN_ID_IN_TOKEN_ID, arg->in.token_id); +} + +static void +uburma_register_seg_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_register_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, REGISTER_SEG_IN_VA, arg->in.va); + SPEC(s++, REGISTER_SEG_IN_LEN, arg->in.len); + SPEC(s++, REGISTER_SEG_IN_TOKEN_ID, arg->in.token_id); + SPEC(s++, REGISTER_SEG_IN_TOKEN_ID_HANDLE, + arg->in.token_id_handle); + SPEC(s++, REGISTER_SEG_IN_TOKEN, arg->in.token); + SPEC(s++, REGISTER_SEG_IN_FLAG, arg->in.flag); + SPEC(s++, REGISTER_SEG_IN_UDATA, arg->udata); +} + +static void +uburma_register_seg_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_register_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, REGISTER_SEG_OUT_TOKEN_ID, arg->out.token_id); + SPEC(s++, REGISTER_SEG_OUT_HANDLE, arg->out.handle); + SPEC(s++, REGISTER_SEG_OUT_UDATA, arg->udata); +} + +static void +uburma_unregister_seg_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unregister_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNREGISTER_SEG_IN_HANDLE, arg->in.handle); +} + +static void +uburma_import_seg_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_SEG_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_SEG_IN_VA, arg->in.va); + SPEC(s++, IMPORT_SEG_IN_LEN, arg->in.len); + SPEC(s++, IMPORT_SEG_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_SEG_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_SEG_IN_TOKEN_ID, arg->in.token_id); + SPEC(s++, IMPORT_SEG_IN_MVA, arg->in.mva); + SPEC(s++, IMPORT_SEG_IN_UDATA, arg->udata); +} + +static void +uburma_import_seg_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_SEG_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_SEG_OUT_UDATA, arg->udata); +} + +static void +uburma_unimport_seg_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unimport_seg *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNIMPORT_SEG_IN_HANDLE, arg->in.handle); +} + +static void +uburma_create_jfs_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFS_IN_DEPTH, arg->in.depth); + SPEC(s++, CREATE_JFS_IN_FLAG, arg->in.flag); + SPEC(s++, CREATE_JFS_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, CREATE_JFS_IN_PRIORITY, arg->in.priority); + SPEC(s++, CREATE_JFS_IN_MAX_SGE, arg->in.max_sge); + SPEC(s++, CREATE_JFS_IN_MAX_RSGE, arg->in.max_rsge); + SPEC(s++, CREATE_JFS_IN_MAX_INLINE_DATA, + arg->in.max_inline_data); + SPEC(s++, CREATE_JFS_IN_RETRY_CNT, arg->in.retry_cnt); + SPEC(s++, CREATE_JFS_IN_RNR_RETRY, arg->in.rnr_retry); + SPEC(s++, CREATE_JFS_IN_ERR_TIMEOUT, arg->in.err_timeout); + SPEC(s++, CREATE_JFS_IN_JFC_ID, arg->in.jfc_id); + SPEC(s++, CREATE_JFS_IN_JFC_HANDLE, arg->in.jfc_handle); + SPEC(s++, CREATE_JFS_IN_URMA_JFS, arg->in.urma_jfs); + SPEC(s++, CREATE_JFS_IN_UDATA, arg->udata); +} + +static void +uburma_create_jfs_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFS_OUT_ID, arg->out.id); + SPEC(s++, CREATE_JFS_OUT_DEPTH, arg->out.depth); + SPEC(s++, CREATE_JFS_OUT_MAX_SGE, arg->out.max_sge); + SPEC(s++, CREATE_JFS_OUT_MAX_RSGE, arg->out.max_rsge); + SPEC(s++, CREATE_JFS_OUT_MAX_INLINE_DATA, + arg->out.max_inline_data); + SPEC(s++, CREATE_JFS_OUT_HANDLE, arg->out.handle); + SPEC(s++, CREATE_JFS_OUT_UDATA, arg->udata); +} + +static void +uburma_modify_jfs_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFS_IN_HANDLE, arg->in.handle); + SPEC(s++, MODIFY_JFS_IN_MASK, arg->in.mask); + SPEC(s++, MODIFY_JFS_IN_STATE, arg->in.state); + SPEC(s++, MODIFY_JFS_IN_UDATA, arg->udata); +} + +static void +uburma_modify_jfs_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFS_OUT_UDATA, arg->udata); +} + +static void +uburma_query_jfs_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JFS_IN_HANDLE, arg->in.handle); +} + +static void +uburma_query_jfs_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JFS_OUT_DEPTH, arg->out.depth); + SPEC(s++, QUERY_JFS_OUT_FLAG, arg->out.flag); + SPEC(s++, QUERY_JFS_OUT_TRANS_MODE, arg->out.trans_mode); + SPEC(s++, QUERY_JFS_OUT_PRIORITY, arg->out.priority); + SPEC(s++, QUERY_JFS_OUT_MAX_SGE, arg->out.max_sge); + SPEC(s++, QUERY_JFS_OUT_MAX_RSGE, arg->out.max_rsge); + SPEC(s++, QUERY_JFS_OUT_MAX_INLINE_DATA, + arg->out.max_inline_data); + SPEC(s++, QUERY_JFS_OUT_RETRY_CNT, arg->out.retry_cnt); + SPEC(s++, QUERY_JFS_OUT_RNR_RETRY, arg->out.rnr_retry); + SPEC(s++, QUERY_JFS_OUT_ERR_TIMEOUT, arg->out.err_timeout); + SPEC(s++, QUERY_JFS_OUT_STATE, arg->out.state); +} + +static void +uburma_delete_jfs_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFS_IN_HANDLE, arg->in.handle); +} + +static void +uburma_delete_jfs_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfs *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFS_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); +} + +static void +uburma_delete_jfs_batch_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfs_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFS_BATCH_IN_JFS_COUNT, arg->in.jfs_num); + SPEC(s++, DELETE_JFS_BATCH_IN_JFS_PTR, arg->in.jfs_ptr); +} + +static void +uburma_delete_jfs_batch_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfs_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFS_BATCH_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); + SPEC(s++, DELETE_JFS_BATCH_OUT_BAD_JFS_INDEX, + arg->out.bad_jfs_index); +} + +static void +uburma_create_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFR_IN_DEPTH, arg->in.depth); + SPEC(s++, CREATE_JFR_IN_FLAG, arg->in.flag); + SPEC(s++, CREATE_JFR_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, CREATE_JFR_IN_MAX_SGE, arg->in.max_sge); + SPEC(s++, CREATE_JFR_IN_MIN_RNR_TIMER, arg->in.min_rnr_timer); + SPEC(s++, CREATE_JFR_IN_JFC_ID, arg->in.jfc_id); + SPEC(s++, CREATE_JFR_IN_JFC_HANDLE, arg->in.jfc_handle); + SPEC(s++, CREATE_JFR_IN_TOKEN, arg->in.token); + SPEC(s++, CREATE_JFR_IN_ID, arg->in.id); + SPEC(s++, CREATE_JFR_IN_URMA_JFR, arg->in.urma_jfr); + SPEC(s++, CREATE_JFR_IN_UDATA, arg->udata); +} + +static void +uburma_create_jfr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFR_OUT_ID, arg->out.id); + SPEC(s++, CREATE_JFR_OUT_DEPTH, arg->out.depth); + SPEC(s++, CREATE_JFR_OUT_MAX_SGE, arg->out.max_sge); + SPEC(s++, CREATE_JFR_OUT_HANDLE, arg->out.handle); + SPEC(s++, CREATE_JFR_OUT_UDATA, arg->udata); +} + +static void +uburma_modify_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFR_IN_HANDLE, arg->in.handle); + SPEC(s++, MODIFY_JFR_IN_MASK, arg->in.mask); + SPEC(s++, MODIFY_JFR_IN_RX_THRESHOLD, arg->in.rx_threshold); + SPEC(s++, MODIFY_JFR_IN_STATE, arg->in.state); + SPEC(s++, MODIFY_JFR_IN_UDATA, arg->udata); +} + +static void +uburma_modify_jfr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFR_OUT_UDATA, arg->udata); +} + +static void +uburma_cmd_query_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JFR_IN_HANDLE, arg->in.handle); +} + +static void +uburma_cmd_query_jfr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JFR_OUT_DEPTH, arg->out.depth); + SPEC(s++, QUERY_JFR_OUT_FLAG, arg->out.flag); + SPEC(s++, QUERY_JFR_OUT_TRANS_MODE, arg->out.trans_mode); + SPEC(s++, QUERY_JFR_OUT_MAX_SGE, arg->out.max_sge); + SPEC(s++, QUERY_JFR_OUT_MIN_RNR_TIMER, + arg->out.min_rnr_timer); + SPEC(s++, QUERY_JFR_OUT_TOKEN, arg->out.token); + SPEC(s++, QUERY_JFR_OUT_ID, arg->out.id); + SPEC(s++, QUERY_JFR_OUT_RX_THRESHOLD, arg->out.rx_threshold); + SPEC(s++, QUERY_JFR_OUT_STATE, arg->out.state); +} + +static void +uburma_delete_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFR_IN_HANDLE, arg->in.handle); +} + +static void +uburma_delete_jfr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFR_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); +} + +static void +uburma_delete_jfr_batch_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfr_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFR_BATCH_IN_JFR_COUNT, arg->in.jfr_num); + SPEC(s++, DELETE_JFR_BATCH_IN_JFR_PTR, arg->in.jfr_ptr); +} + +static void +uburma_delete_jfr_batch_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfr_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFR_BATCH_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); + SPEC(s++, DELETE_JFR_BATCH_OUT_BAD_JFR_INDEX, + arg->out.bad_jfr_index); +} + +static void +uburma_create_jfc_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFC_IN_DEPTH, arg->in.depth); + SPEC(s++, CREATE_JFC_IN_FLAG, arg->in.flag); + SPEC(s++, CREATE_JFC_IN_JFCE_FD, arg->in.jfce_fd); + SPEC(s++, CREATE_JFC_IN_URMA_JFC, arg->in.urma_jfc); + SPEC(s++, CREATE_JFC_IN_CEQN, arg->in.ceqn); + SPEC(s++, CREATE_JFC_IN_UDATA, arg->udata); +} + +static void +uburma_create_jfc_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFC_OUT_ID, arg->out.id); + SPEC(s++, CREATE_JFC_OUT_DEPTH, arg->out.depth); + SPEC(s++, CREATE_JFC_OUT_HANDLE, arg->out.handle); + SPEC(s++, CREATE_JFC_OUT_UDATA, arg->udata); +} + +static void +uburma_modify_jfc_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFC_IN_HANDLE, arg->in.handle); + SPEC(s++, MODIFY_JFC_IN_MASK, arg->in.mask); + SPEC(s++, MODIFY_JFC_IN_MODERATE_COUNT, + arg->in.moderate_count); + SPEC(s++, MODIFY_JFC_IN_MODERATE_PERIOD, + arg->in.moderate_period); + SPEC(s++, MODIFY_JFC_IN_UDATA, arg->udata); +} + +static void +uburma_modify_jfc_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JFC_OUT_UDATA, arg->udata); +} + +static void +uburma_delete_jfc_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFC_IN_HANDLE, arg->in.handle); +} + +static void +uburma_delete_jfc_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfc *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFC_OUT_COMP_EVENTS_REPORTED, + arg->out.comp_events_reported); + SPEC(s++, DELETE_JFC_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); +} + +static void +uburma_delete_jfc_batch_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfc_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFC_BATCH_IN_JFC_COUNT, arg->in.jfc_num); + SPEC(s++, DELETE_JFC_BATCH_IN_JFC_PTR, arg->in.jfc_ptr); +} + +static void +uburma_delete_jfc_batch_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jfc_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JFC_BATCH_OUT_COMP_EVENTS_REPORTED, + arg->out.comp_events_reported); + SPEC(s++, DELETE_JFC_BATCH_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); + SPEC(s++, DELETE_JFC_BATCH_OUT_BAD_JFC_INDEX, + arg->out.bad_jfc_index); +} + +static void +uburma_create_jfce_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jfce *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JFCE_OUT_FD, arg->out.fd); +} + +static void +uburma_import_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JFR_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_JFR_IN_ID, arg->in.id); + SPEC(s++, IMPORT_JFR_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_JFR_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_JFR_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, IMPORT_JFR_IN_UDATA, arg->udata); +} + +static void +uburma_import_jfr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JFR_OUT_TPN, arg->out.tpn); + SPEC(s++, IMPORT_JFR_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_JFR_OUT_UDATA, arg->udata); +} + +static void +uburma_unimport_jfr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unimport_jfr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNIMPORT_JFR_IN_HANDLE, arg->in.handle); +} + +static void +uburma_create_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JETTY_IN_ID, arg->in.id); + SPEC(s++, CREATE_JETTY_IN_JETTY_FLAG, arg->in.jetty_flag); + SPEC(s++, CREATE_JETTY_IN_JFS_DEPTH, arg->in.jfs_depth); + SPEC(s++, CREATE_JETTY_IN_JFS_FLAG, arg->in.jfs_flag); + SPEC(s++, CREATE_JETTY_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, CREATE_JETTY_IN_PRIORITY, arg->in.priority); + SPEC(s++, CREATE_JETTY_IN_MAX_SEND_SGE, arg->in.max_send_sge); + SPEC(s++, CREATE_JETTY_IN_MAX_SEND_RSGE, + arg->in.max_send_rsge); + SPEC(s++, CREATE_JETTY_IN_MAX_INLINE_DATA, + arg->in.max_inline_data); + SPEC(s++, CREATE_JETTY_IN_RNR_RETRY, arg->in.rnr_retry); + SPEC(s++, CREATE_JETTY_IN_ERR_TIMEOUT, arg->in.err_timeout); + SPEC(s++, CREATE_JETTY_IN_SEND_JFC_ID, arg->in.send_jfc_id); + SPEC(s++, CREATE_JETTY_IN_SEND_JFC_HANDLE, + arg->in.send_jfc_handle); + SPEC(s++, CREATE_JETTY_IN_JFR_DEPTH, arg->in.jfr_depth); + SPEC(s++, CREATE_JETTY_IN_JFR_FLAG, arg->in.jfr_flag); + SPEC(s++, CREATE_JETTY_IN_MAX_RECV_SGE, arg->in.max_recv_sge); + SPEC(s++, CREATE_JETTY_IN_MIN_RNR_TIMER, + arg->in.min_rnr_timer); + SPEC(s++, CREATE_JETTY_IN_RECV_JFC_ID, arg->in.recv_jfc_id); + SPEC(s++, CREATE_JETTY_IN_RECV_JFC_HANDLE, + arg->in.recv_jfc_handle); + SPEC(s++, CREATE_JETTY_IN_TOKEN, arg->in.token); + SPEC(s++, CREATE_JETTY_IN_JFR_ID, arg->in.jfr_id); + SPEC(s++, CREATE_JETTY_IN_JFR_HANDLE, arg->in.jfr_handle); + SPEC(s++, CREATE_JETTY_IN_JETTY_GRP_HANDLE, + arg->in.jetty_grp_handle); + SPEC(s++, CREATE_JETTY_IN_IS_JETTY_GRP, arg->in.is_jetty_grp); + SPEC(s++, CREATE_JETTY_IN_URMA_JETTY, arg->in.urma_jetty); + SPEC(s++, CREATE_JETTY_IN_UDATA, arg->udata); +} + +static void +uburma_create_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JETTY_OUT_ID, arg->out.id); + SPEC(s++, CREATE_JETTY_OUT_HANDLE, arg->out.handle); + SPEC(s++, CREATE_JETTY_OUT_JFS_DEPTH, arg->out.jfs_depth); + SPEC(s++, CREATE_JETTY_OUT_JFR_DEPTH, arg->out.jfr_depth); + SPEC(s++, CREATE_JETTY_OUT_MAX_SEND_SGE, + arg->out.max_send_sge); + SPEC(s++, CREATE_JETTY_OUT_MAX_SEND_RSGE, + arg->out.max_send_rsge); + SPEC(s++, CREATE_JETTY_OUT_MAX_RECV_SGE, + arg->out.max_recv_sge); + SPEC(s++, CREATE_JETTY_OUT_MAX_INLINE_DATA, + arg->out.max_inline_data); + SPEC(s++, CREATE_JETTY_OUT_UDATA, arg->udata); +} + +static void +uburma_modify_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JETTY_IN_HANDLE, arg->in.handle); + SPEC(s++, MODIFY_JETTY_IN_MASK, arg->in.mask); + SPEC(s++, MODIFY_JETTY_IN_RX_THRESHOLD, arg->in.rx_threshold); + SPEC(s++, MODIFY_JETTY_IN_STATE, arg->in.state); + SPEC(s++, MODIFY_JETTY_IN_UDATA, arg->udata); +} + +static void +uburma_modify_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_JETTY_OUT_UDATA, arg->udata); +} + +static void +uburma_query_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JETTY_IN_HANDLE, arg->in.handle); +} + +static void +uburma_query_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_JETTY_OUT_ID, arg->out.id); + SPEC(s++, QUERY_JETTY_OUT_JETTY_FLAG, arg->out.jetty_flag); + SPEC(s++, QUERY_JETTY_OUT_JFS_DEPTH, arg->out.jfs_depth); + SPEC(s++, QUERY_JETTY_OUT_JFR_DEPTH, arg->out.jfr_depth); + SPEC(s++, QUERY_JETTY_OUT_JFS_FLAG, arg->out.jfs_flag); + SPEC(s++, QUERY_JETTY_OUT_JFR_FLAG, arg->out.jfr_flag); + SPEC(s++, QUERY_JETTY_OUT_TRANS_MODE, arg->out.trans_mode); + SPEC(s++, QUERY_JETTY_OUT_MAX_SEND_SGE, + arg->out.max_send_sge); + SPEC(s++, QUERY_JETTY_OUT_MAX_SEND_RSGE, + arg->out.max_send_rsge); + SPEC(s++, QUERY_JETTY_OUT_MAX_RECV_SGE, + arg->out.max_recv_sge); + SPEC(s++, QUERY_JETTY_OUT_MAX_INLINE_DATA, + arg->out.max_inline_data); + SPEC(s++, QUERY_JETTY_OUT_PRIORITY, arg->out.priority); + SPEC(s++, QUERY_JETTY_OUT_RETRY_CNT, arg->out.retry_cnt); + SPEC(s++, QUERY_JETTY_OUT_RNR_RETRY, arg->out.rnr_retry); + SPEC(s++, QUERY_JETTY_OUT_ERR_TIMEOUT, arg->out.err_timeout); + SPEC(s++, QUERY_JETTY_OUT_MIN_RNR_TIMER, + arg->out.min_rnr_timer); + SPEC(s++, QUERY_JETTY_OUT_JFR_ID, arg->out.jfr_id); + SPEC(s++, QUERY_JETTY_OUT_TOKEN, arg->out.token); + SPEC(s++, QUERY_JETTY_OUT_RX_THRESHOLD, + arg->out.rx_threshold); + SPEC(s++, QUERY_JETTY_OUT_STATE, arg->out.state); +} + +static void +uburma_delete_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_IN_HANDLE, arg->in.handle); +} + +static void +uburma_delete_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); +} + +static void +uburma_delete_jetty_batch_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_BATCH_IN_JETTY_COUNT, + arg->in.jetty_num); + SPEC(s++, DELETE_JETTY_BATCH_IN_JETTY_PTR, arg->in.jetty_ptr); +} + +static void +uburma_delete_jetty_batch_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty_batch *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_BATCH_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); + SPEC(s++, DELETE_JETTY_BATCH_OUT_BAD_JETTY_INDEX, + arg->out.bad_jetty_index); +} + +static void +uburma_import_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_JETTY_IN_ID, arg->in.id); + SPEC(s++, IMPORT_JETTY_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_JETTY_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_JETTY_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, IMPORT_JETTY_IN_POLICY, arg->in.policy); + SPEC(s++, IMPORT_JETTY_IN_TYPE, arg->in.type); + SPEC(s++, IMPORT_JETTY_IN_UDATA, arg->udata); +} + +static void +uburma_import_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_OUT_TPN, arg->out.tpn); + SPEC(s++, IMPORT_JETTY_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_JETTY_OUT_UDATA, arg->udata); +} + +static void +uburma_unimport_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unimport_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNIMPORT_JETTY_IN_HANDLE, arg->in.handle); +} + +static void +uburma_advise_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_advise_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, ADVISE_JETTY_IN_JETTY_HANDLE, arg->in.jetty_handle); + SPEC(s++, ADVISE_JETTY_IN_TJETTY_HANDLE, + arg->in.tjetty_handle); + SPEC(s++, ADVISE_JETTY_IN_UDATA, arg->udata); +} + +static void +uburma_unadvise_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unadvise_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNADVISE_JETTY_IN_JETTY_HANDLE, + arg->in.jetty_handle); + SPEC(s++, UNADVISE_JETTY_IN_TJETTY_HANDLE, + arg->in.tjetty_handle); +} + +static void +uburma_bind_jetty_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_IN_JETTY_HANDLE, arg->in.jetty_handle); + SPEC(s++, BIND_JETTY_IN_TJETTY_HANDLE, arg->in.tjetty_handle); + SPEC(s++, BIND_JETTY_IN_UDATA, arg->udata); +} + +static void +uburma_bind_jetty_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_OUT_TPN, arg->out.tpn); + SPEC(s++, BIND_JETTY_OUT_UDATA, arg->udata); +} + +static void +uburma_create_jetty_grp_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jetty_grp *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JETTY_GRP_IN_NAME, arg->in.name); + SPEC(s++, CREATE_JETTY_GRP_IN_TOKEN, arg->in.token); + SPEC(s++, CREATE_JETTY_GRP_IN_ID, arg->in.id); + SPEC(s++, CREATE_JETTY_GRP_IN_POLICY, arg->in.policy); + SPEC(s++, CREATE_JETTY_GRP_IN_FLAG, arg->in.flag); + SPEC(s++, CREATE_JETTY_GRP_IN_URMA_JETTY_GRP, + arg->in.urma_jetty_grp); + SPEC(s++, CREATE_JETTY_GRP_IN_UDATA, arg->udata); +} + +static void +uburma_create_jetty_grp_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_jetty_grp *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_JETTY_GRP_OUT_ID, arg->out.id); + SPEC(s++, CREATE_JETTY_GRP_OUT_HANDLE, arg->out.handle); + SPEC(s++, CREATE_JETTY_GRP_OUT_UDATA, arg->udata); +} + +static void +uburma_delete_jetty_grp_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty_grp *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_GRP_IN_HANDLE, arg->in.handle); +} + +static void +uburma_delete_jetty_grp_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_delete_jetty_grp *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, DELETE_JETTY_GRP_OUT_ASYNC_EVENTS_REPORTED, + arg->out.async_events_reported); +} + +static void uburma_user_ctl_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_user_ctl *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, USER_CTL_IN_IN_ADDR, arg->in.addr); + SPEC(s++, USER_CTL_IN_IN_LEN, arg->in.len); + SPEC(s++, USER_CTL_IN_OPCODE, arg->in.opcode); + SPEC(s++, USER_CTL_IN_OUT_ADDR, arg->out.addr); + SPEC(s++, USER_CTL_IN_OUT_LEN, arg->out.len); + SPEC(s++, USER_CTL_IN_UDATA, arg->udrv); +} + +static void +uburma_get_eid_list_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_eid_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_EID_LIST_IN_MAX_EID_CNT, arg->in.max_eid_cnt); +} + +static void +uburma_get_eid_list_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_eid_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_EID_LIST_OUT_EID_CNT, arg->out.eid_cnt); + SPEC(s++, GET_EID_LIST_OUT_EID_LIST, arg->out.eid_list); +} + +static void +uburma_get_net_addr_list_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_net_addr_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_NET_ADDR_LIST_IN_MAX_NETADDR_CNT, + arg->in.max_netaddr_cnt); +} + +static void +uburma_get_net_addr_list_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_net_addr_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + uint64_t netaddr_size = + sizeof(struct uburma_cmd_net_addr_info); + + SPEC(s++, GET_NET_ADDR_LIST_OUT_NETADDR_CNT, + arg->out.netaddr_cnt); + fill_spec(s++, GET_NET_ADDR_LIST_OUT_NETADDR_LIST, + netaddr_size, arg->out.len / netaddr_size, + netaddr_size, arg->out.addr); +} + +static void +uburma_modify_tp_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_modify_tp *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, MODIFY_TP_IN_TPN, arg->in.tpn); + SPEC(s++, MODIFY_TP_IN_TP_CFG_FLAG, arg->in.tp_cfg.flag); + SPEC(s++, MODIFY_TP_IN_TP_CFG_TRANS_MODE, + arg->in.tp_cfg.trans_mode); + SPEC(s++, MODIFY_TP_IN_TP_CFG_RETRY_NUM, + arg->in.tp_cfg.retry_num); + SPEC(s++, MODIFY_TP_IN_TP_CFG_RETRY_FACTOR, + arg->in.tp_cfg.retry_factor); + SPEC(s++, MODIFY_TP_IN_TP_CFG_ACK_TIMEOUT, + arg->in.tp_cfg.ack_timeout); + SPEC(s++, MODIFY_TP_IN_TP_CFG_DSCP, arg->in.tp_cfg.dscp); + SPEC(s++, MODIFY_TP_IN_TP_CFG_OOR_CNT, + arg->in.tp_cfg.oor_cnt); + SPEC(s++, MODIFY_TP_IN_ATTR_FLAG, arg->in.attr.flag); + SPEC(s++, MODIFY_TP_IN_ATTR_PEER_TPN, arg->in.attr.peer_tpn); + SPEC(s++, MODIFY_TP_IN_ATTR_STATE, arg->in.attr.state); + SPEC(s++, MODIFY_TP_IN_ATTR_TX_PSN, arg->in.attr.tx_psn); + SPEC(s++, MODIFY_TP_IN_ATTR_RX_PSN, arg->in.attr.rx_psn); + SPEC(s++, MODIFY_TP_IN_ATTR_MTU, arg->in.attr.mtu); + SPEC(s++, MODIFY_TP_IN_ATTR_CC_PATTERN_IDX, + arg->in.attr.cc_pattern_idx); + SPEC(s++, MODIFY_TP_IN_ATTR_OOS_CNT, arg->in.attr.oos_cnt); + SPEC(s++, MODIFY_TP_IN_ATTR_LOCAL_NET_ADDR_IDX, + arg->in.attr.local_net_addr_idx); + SPEC(s++, MODIFY_TP_IN_ATTR_PEER_NET_ADDR, + arg->in.attr.peer_net_addr); + SPEC(s++, MODIFY_TP_IN_ATTR_DATA_UDP_START, + arg->in.attr.data_udp_start); + SPEC(s++, MODIFY_TP_IN_ATTR_ACK_UDP_START, + arg->in.attr.ack_udp_start); + SPEC(s++, MODIFY_TP_IN_ATTR_UDP_RANGE, + arg->in.attr.udp_range); + SPEC(s++, MODIFY_TP_IN_ATTR_HOP_LIMIT, + arg->in.attr.hop_limit); + SPEC(s++, MODIFY_TP_IN_ATTR_FLOW_LABEL, + arg->in.attr.flow_label); + SPEC(s++, MODIFY_TP_IN_ATTR_PORT_ID, arg->in.attr.port_id); + SPEC(s++, MODIFY_TP_IN_ATTR_MN, arg->in.attr.mn); + SPEC(s++, MODIFY_TP_IN_ATTR_PEER_TRANS_TYPE, + arg->in.attr.peer_trans_type); + SPEC(s++, MODIFY_TP_IN_MASK, arg->in.mask); +} + +static void +uburma_query_device_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_device_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_DEVICE_IN_DEV_NAME, arg->in.dev_name); +} + +static void +uburma_query_device_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_query_device_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, QUERY_DEVICE_OUT_GUID, arg->out.attr.guid); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_FEATURE, + arg->out.attr.dev_cap.feature); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFC, + arg->out.attr.dev_cap.max_jfc); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS, + arg->out.attr.dev_cap.max_jfs); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR, + arg->out.attr.dev_cap.max_jfr); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY, + arg->out.attr.dev_cap.max_jetty); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY_GRP, + arg->out.attr.dev_cap.max_jetty_grp); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY_IN_JETTY_GRP, + arg->out.attr.dev_cap.max_jetty_in_jetty_grp); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFC_DEPTH, + arg->out.attr.dev_cap.max_jfc_depth); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_DEPTH, + arg->out.attr.dev_cap.max_jfs_depth); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR_DEPTH, + arg->out.attr.dev_cap.max_jfr_depth); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_INLINE_LEN, + arg->out.attr.dev_cap.max_jfs_inline_len); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_SGE, + arg->out.attr.dev_cap.max_jfs_sge); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_RSGE, + arg->out.attr.dev_cap.max_jfs_rsge); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR_SGE, + arg->out.attr.dev_cap.max_jfr_sge); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_MSG_SIZE, + arg->out.attr.dev_cap.max_msg_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_READ_SIZE, + arg->out.attr.dev_cap.max_read_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_WRITE_SIZE, + arg->out.attr.dev_cap.max_write_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_CAS_SIZE, + arg->out.attr.dev_cap.max_cas_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_SWAP_SIZE, + arg->out.attr.dev_cap.max_swap_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_ADD_SIZE, + arg->out.attr.dev_cap.max_fetch_and_add_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_SUB_SIZE, + arg->out.attr.dev_cap.max_fetch_and_sub_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_AND_SIZE, + arg->out.attr.dev_cap.max_fetch_and_and_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_OR_SIZE, + arg->out.attr.dev_cap.max_fetch_and_or_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_XOR_SIZE, + arg->out.attr.dev_cap.max_fetch_and_xor_size); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_ATOMIC_FEAT, + arg->out.attr.dev_cap.atomic_feat); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_TRANS_MODE, + arg->out.attr.dev_cap.trans_mode); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_SUB_TRANS_MODE_CAP, + arg->out.attr.dev_cap.sub_trans_mode_cap); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_CONGESTION_CTRL_ALG, + arg->out.attr.dev_cap.congestion_ctrl_alg); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_CEQ_CNT, + arg->out.attr.dev_cap.ceq_cnt); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_TP_IN_TPG, + arg->out.attr.dev_cap.max_tp_in_tpg); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_EID_CNT, + arg->out.attr.dev_cap.max_eid_cnt); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_PAGE_SIZE_CAP, + arg->out.attr.dev_cap.page_size_cap); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_OOR_CNT, + arg->out.attr.dev_cap.max_oor_cnt); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MN, + arg->out.attr.dev_cap.mn); + SPEC(s++, QUERY_DEVICE_OUT_DEV_CAP_MAX_NETADDR_CN, + arg->out.attr.dev_cap.max_netaddr_cnt); + SPEC(s++, QUERY_DEVICE_OUT_PORT_CNT, arg->out.attr.port_cnt); + SPEC(s++, QUERY_DEVICE_OUT_RESERVED_JETTY_ID_MIN, + arg->out.attr.reserved_jetty_id_min); + SPEC(s++, QUERY_DEVICE_OUT_RESERVED_JETTY_ID_MAX, + arg->out.attr.reserved_jetty_id_max); + + SPEC_ARRAY(s++, QUERY_DEVICE_OUT_PORT_ATTR_MAX_MTU, + arg->out.attr.port_attr, max_mtu); + SPEC_ARRAY(s++, QUERY_DEVICE_OUT_PORT_ATTR_STATE, + arg->out.attr.port_attr, state); + SPEC_ARRAY(s++, QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_WIDTH, + arg->out.attr.port_attr, active_width); + SPEC_ARRAY(s++, QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_SPEED, + arg->out.attr.port_attr, active_speed); + SPEC_ARRAY(s++, QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_MTU, + arg->out.attr.port_attr, active_mtu); +} + +static void +uburma_import_jetty_async_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_ASYNC_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_ID, arg->in.id); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_TRANS_MODE, + arg->in.trans_mode); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_POLICY, arg->in.policy); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_TYPE, arg->in.type); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_URMA_TJETTY, + arg->in.urma_tjetty); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_USER_CTX, arg->in.user_ctx); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_FD, arg->in.fd); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_TIMEOUT, arg->in.timeout); + SPEC(s++, IMPORT_JETTY_ASYNC_IN_UDATA, arg->udata); +} + +static void +uburma_import_jetty_async_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_ASYNC_OUT_TPN, arg->out.tpn); + SPEC(s++, IMPORT_JETTY_ASYNC_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_JETTY_ASYNC_OUT_UDATA, arg->udata); +} + +static void +uburma_unimport_jetty_async_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unimport_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNIMPORT_JETTY_ASYNC_IN_HANDLE, arg->in.handle); +} + +static void +uburma_bind_jetty_async_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_ASYNC_IN_JETTY_HANDLE, + arg->in.jetty_handle); + SPEC(s++, BIND_JETTY_ASYNC_IN_TJETTY_HANDLE, + arg->in.tjetty_handle); + SPEC(s++, BIND_JETTY_ASYNC_IN_URMA_TJETTY, + arg->in.urma_tjetty); + SPEC(s++, BIND_JETTY_ASYNC_IN_URMA_JETTY, arg->in.urma_jetty); + SPEC(s++, BIND_JETTY_ASYNC_IN_FD, arg->in.fd); + SPEC(s++, BIND_JETTY_ASYNC_IN_USER_CTX, arg->in.user_ctx); + SPEC(s++, BIND_JETTY_ASYNC_IN_TIMEOUT, arg->in.timeout); + SPEC(s++, BIND_JETTY_ASYNC_IN_UDATA, arg->udata); +} + +static void +uburma_bind_jetty_async_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_ASYNC_OUT_TPN, arg->out.tpn); + SPEC(s++, BIND_JETTY_ASYNC_OUT_UDATA, arg->udata); +} + +static void +uburma_unbind_jetty_async_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_unbind_jetty_async *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, UNBIND_JETTY_ASYNC_IN_JETTY_HANDLE, + arg->in.jetty_handle); + SPEC(s++, UNBIND_JETTY_ASYNC_IN_TJETTY_HANDLE, + arg->in.tjetty_handle); +} + +static void +uburma_create_notifier_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_create_notifier *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, CREATE_NOTIFIER_OUT_FD, arg->out.fd); +} + +static void +uburma_get_tp_list_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_tp_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_TP_LIST_IN_FLAG, arg->in.flag); + SPEC(s++, GET_TP_LIST_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, GET_TP_LIST_IN_LOCAL_EID, arg->in.local_eid); + SPEC(s++, GET_TP_LIST_IN_PEER_EID, arg->in.peer_eid); + SPEC(s++, GET_TP_LIST_IN_TP_CNT, arg->in.tp_cnt); + SPEC(s++, GET_TP_LIST_IN_UDATA, arg->udata); +} + +static void +uburma_get_tp_list_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_tp_list *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_TP_LIST_OUT_TP_CNT, arg->out.tp_cnt); + SPEC(s++, GET_TP_LIST_OUT_TP_HANDLE, arg->out.tp_handle); + SPEC(s++, GET_TP_LIST_OUT_UDATA, arg->udata); +} + +static void +uburma_import_jetty_ex_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_EX_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_JETTY_EX_IN_ID, arg->in.id); + SPEC(s++, IMPORT_JETTY_EX_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_JETTY_EX_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_JETTY_EX_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, IMPORT_JETTY_EX_IN_POLICY, arg->in.policy); + SPEC(s++, IMPORT_JETTY_EX_IN_TYPE, arg->in.type); + SPEC(s++, IMPORT_JETTY_EX_IN_TP_HANDLE, arg->in.tp_handle); + SPEC(s++, IMPORT_JETTY_EX_IN_PEER_TP_HANDLE, + arg->in.peer_tp_handle); + SPEC(s++, IMPORT_JETTY_EX_IN_TAG, arg->in.tag); + SPEC(s++, IMPORT_JETTY_EX_IN_TX_PSN, arg->in.tx_psn); + SPEC(s++, IMPORT_JETTY_EX_IN_RX_PSN, arg->in.rx_psn); + SPEC(s++, IMPORT_JETTY_EX_IN_UDATA, arg->udata); +} + +static void +uburma_import_jetty_ex_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jetty_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JETTY_EX_OUT_TPN, arg->out.tpn); + SPEC(s++, IMPORT_JETTY_EX_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_JETTY_EX_OUT_UDATA, arg->udata); +} + +static void +uburma_import_jfr_ex_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jfr_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JFR_EX_IN_EID, arg->in.eid); + SPEC(s++, IMPORT_JFR_EX_IN_ID, arg->in.id); + SPEC(s++, IMPORT_JFR_EX_IN_FLAG, arg->in.flag); + SPEC(s++, IMPORT_JFR_EX_IN_TOKEN, arg->in.token); + SPEC(s++, IMPORT_JFR_EX_IN_TRANS_MODE, arg->in.trans_mode); + SPEC(s++, IMPORT_JFR_EX_IN_TP_HANDLE, arg->in.tp_handle); + SPEC(s++, IMPORT_JFR_EX_IN_PEER_TP_HANDLE, + arg->in.peer_tp_handle); + SPEC(s++, IMPORT_JFR_EX_IN_TAG, arg->in.tag); + SPEC(s++, IMPORT_JFR_EX_IN_TX_PSN, arg->in.tx_psn); + SPEC(s++, IMPORT_JFR_EX_IN_RX_PSN, arg->in.rx_psn); + SPEC(s++, IMPORT_JFR_EX_IN_UDATA, arg->udata); +} + +static void +uburma_import_jfr_ex_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_import_jfr_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, IMPORT_JFR_EX_OUT_TPN, arg->out.tpn); + SPEC(s++, IMPORT_JFR_EX_OUT_HANDLE, arg->out.handle); + SPEC(s++, IMPORT_JFR_EX_OUT_UDATA, arg->udata); +} + +static void +uburma_bind_jetty_ex_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_EX_IN_JETTY_HANDLE, + arg->in.jetty_handle); + SPEC(s++, BIND_JETTY_EX_IN_TJETTY_HANDLE, + arg->in.tjetty_handle); + SPEC(s++, BIND_JETTY_EX_IN_TP_HANDLE, arg->in.tp_handle); + SPEC(s++, BIND_JETTY_EX_IN_PEER_TP_HANDLE, + arg->in.peer_tp_handle); + SPEC(s++, BIND_JETTY_EX_IN_TAG, arg->in.tag); + SPEC(s++, BIND_JETTY_EX_IN_TX_PSN, arg->in.tx_psn); + SPEC(s++, BIND_JETTY_EX_IN_RX_PSN, arg->in.rx_psn); + SPEC(s++, BIND_JETTY_EX_IN_UDATA, arg->udata); +} + +static void +uburma_bind_jetty_ex_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_bind_jetty_ex *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, BIND_JETTY_EX_OUT_TPN, arg->out.tpn); + SPEC(s++, BIND_JETTY_EX_OUT_UDATA, arg->udata); +} + +static void +uburma_set_tp_attr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_set_tp_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, SET_TP_ATTR_IN_TP_HANLDE, arg->in.tp_handle); + SPEC(s++, SET_TP_ATTR_IN_TP_ATTR_CNT, arg->in.tp_attr_cnt); + SPEC(s++, SET_TP_ATTR_IN_TP_ATTR_BITMAP, + arg->in.tp_attr_bitmap); + SPEC(s++, SET_TP_ATTR_IN_TP_ATTR, arg->in.tp_attr); + SPEC(s++, SET_TP_ATTR_IN_UDATA, arg->udata); +} + +static void +uburma_set_tp_attr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_set_tp_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, SET_TP_ATTR_OUT_UDATA, arg->udata); +} + +static void +uburma_get_tp_attr_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_tp_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_TP_ATTR_IN_TP_HANDLE, arg->in.tp_handle); + SPEC(s++, GET_TP_ATTR_IN_UDATA, arg->udata); +} + +static void +uburma_get_tp_attr_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_get_tp_attr *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_TP_ATTR_OUT_TP_ATTR_CNT, arg->out.tp_attr_cnt); + SPEC(s++, GET_TP_ATTR_OUT_TP_ATTR_BITMAP, + arg->out.tp_attr_bitmap); + SPEC(s++, GET_TP_ATTR_OUT_TP_ATTR, arg->out.tp_attr); + SPEC(s++, GET_TP_ATTR_OUT_UDATA, arg->udata); +} + +static void +uburma_exchange_tp_info_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_exchange_tp_info *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, EXCHANGE_TP_INFO_IN_FLAG, arg->in.get_tp_cfg.flag); + SPEC(s++, EXCHANGE_TP_INFO_IN_TRANS_MODE, + arg->in.get_tp_cfg.trans_mode); + SPEC(s++, EXCHANGE_TP_INFO_IN_LOCAL_EID, + arg->in.get_tp_cfg.local_eid); + SPEC(s++, EXCHANGE_TP_INFO_IN_PEER_EID, + arg->in.get_tp_cfg.peer_eid); + SPEC(s++, EXCHANGE_TP_INFO_IN_TP_HANDLE, arg->in.tp_handle); + SPEC(s++, EXCHANGE_TP_INFO_IN_TX_PSN, arg->in.tx_psn); +} + +static void +uburma_exchange_tp_info_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_exchange_tp_info *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, EXCHANGE_TP_INFO_OUT_PEER_TP_HANDLE, + arg->out.peer_tp_handle); + SPEC(s++, EXCHANGE_TP_INFO_OUT_RX_PSN, arg->out.rx_psn); +} + +static struct uburma_tlv_handler g_tlv_handler[] = { + [0] = {0}, + [UBURMA_CMD_CREATE_CTX] = { + uburma_create_ctx_fill_spec_in, CREATE_CTX_IN_NUM, + uburma_create_ctx_fill_spec_out, CREATE_CTX_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_ALLOC_TOKEN_ID] = { + uburma_alloc_token_id_fill_spec_in, ALLOC_TOKEN_ID_IN_NUM, + uburma_alloc_token_id_fill_spec_out, ALLOC_TOKEN_ID_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_FREE_TOKEN_ID] = { + uburma_free_token_id_fill_spec_in, FREE_TOKEN_ID_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_REGISTER_SEG] = { + uburma_register_seg_fill_spec_in, REGISTER_SEG_IN_NUM, + uburma_register_seg_fill_spec_out, REGISTER_SEG_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNREGISTER_SEG] = { + uburma_unregister_seg_fill_spec_in, UNREGISTER_SEG_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_IMPORT_SEG] = { + uburma_import_seg_fill_spec_in, IMPORT_SEG_IN_NUM, + uburma_import_seg_fill_spec_out, IMPORT_SEG_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNIMPORT_SEG] = { + uburma_unimport_seg_fill_spec_in, UNIMPORT_SEG_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_CREATE_JFR] = { + uburma_create_jfr_fill_spec_in, CREATE_JFR_IN_NUM, + uburma_create_jfr_fill_spec_out, CREATE_JFR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_MODIFY_JFR] = { + uburma_modify_jfr_fill_spec_in, MODIFY_JFR_IN_NUM, + uburma_modify_jfr_fill_spec_out, MODIFY_JFR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_QUERY_JFR] = { + uburma_cmd_query_jfr_fill_spec_in, QUERY_JFR_IN_NUM, + uburma_cmd_query_jfr_fill_spec_out, QUERY_JFR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFR] = { + uburma_delete_jfr_fill_spec_in, DELETE_JFR_IN_NUM, + uburma_delete_jfr_fill_spec_out, DELETE_JFR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_CREATE_JFS] = { + uburma_create_jfs_fill_spec_in, CREATE_JFS_IN_NUM, + uburma_create_jfs_fill_spec_out, CREATE_JFS_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_MODIFY_JFS] = { + uburma_modify_jfs_fill_spec_in, MODIFY_JFS_IN_NUM, + uburma_modify_jfs_fill_spec_out, MODIFY_JFS_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_QUERY_JFS] = { + uburma_query_jfs_fill_spec_in, QUERY_JFS_IN_NUM, + uburma_query_jfs_fill_spec_out, QUERY_JFS_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFS] = { + uburma_delete_jfs_fill_spec_in, DELETE_JFS_IN_NUM, + uburma_delete_jfs_fill_spec_out, DELETE_JFS_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_CREATE_JFC] = { + uburma_create_jfc_fill_spec_in, CREATE_JFC_IN_NUM, + uburma_create_jfc_fill_spec_out, CREATE_JFC_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_MODIFY_JFC] = { + uburma_modify_jfc_fill_spec_in, MODIFY_JFC_IN_NUM, + uburma_modify_jfc_fill_spec_out, MODIFY_JFC_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFC] = { + uburma_delete_jfc_fill_spec_in, DELETE_JFC_IN_NUM, + uburma_delete_jfc_fill_spec_out, DELETE_JFC_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_CREATE_JFCE] = { + NULL, 0, + uburma_create_jfce_fill_spec_out, CREATE_JFCE_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_IMPORT_JFR] = { + uburma_import_jfr_fill_spec_in, IMPORT_JFR_IN_NUM, + uburma_import_jfr_fill_spec_out, IMPORT_JFR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNIMPORT_JFR] = { + uburma_unimport_jfr_fill_spec_in, UNIMPORT_JFR_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_CREATE_JETTY] = { + uburma_create_jetty_fill_spec_in, CREATE_JETTY_IN_NUM, + uburma_create_jetty_fill_spec_out, CREATE_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_MODIFY_JETTY] = { + uburma_modify_jetty_fill_spec_in, MODIFY_JETTY_IN_NUM, + uburma_modify_jetty_fill_spec_out, MODIFY_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_QUERY_JETTY] = { + uburma_query_jetty_fill_spec_in, QUERY_JETTY_IN_NUM, + uburma_query_jetty_fill_spec_out, QUERY_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JETTY] = { + uburma_delete_jetty_fill_spec_in, DELETE_JETTY_IN_NUM, + uburma_delete_jetty_fill_spec_out, DELETE_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_IMPORT_JETTY] = { + uburma_import_jetty_fill_spec_in, IMPORT_JETTY_IN_NUM, + uburma_import_jetty_fill_spec_out, IMPORT_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNIMPORT_JETTY] = { + uburma_unimport_jetty_fill_spec_in, UNIMPORT_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_ADVISE_JFR] = { + uburma_advise_jetty_fill_spec_in, ADVISE_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_UNADVISE_JFR] = { + uburma_unadvise_jetty_fill_spec_in, UNADVISE_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_ADVISE_JETTY] = { + uburma_advise_jetty_fill_spec_in, ADVISE_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_UNADVISE_JETTY] = { + uburma_unadvise_jetty_fill_spec_in, UNADVISE_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_BIND_JETTY] = { + uburma_bind_jetty_fill_spec_in, BIND_JETTY_IN_NUM, + uburma_bind_jetty_fill_spec_out, BIND_JETTY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNBIND_JETTY] = { + uburma_unadvise_jetty_fill_spec_in, UNADVISE_JETTY_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_CREATE_JETTY_GRP] = { + uburma_create_jetty_grp_fill_spec_in, CREATE_JETTY_GRP_IN_NUM, + uburma_create_jetty_grp_fill_spec_out, CREATE_JETTY_GRP_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DESTROY_JETTY_GRP] = { + uburma_delete_jetty_grp_fill_spec_in, DELETE_JETTY_GRP_IN_NUM, + uburma_delete_jetty_grp_fill_spec_out, DELETE_JETTY_GRP_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_USER_CTL] = { + uburma_user_ctl_fill_spec_in, USER_CTL_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_GET_EID_LIST] = { + uburma_get_eid_list_fill_spec_in, GET_EID_LIST_IN_NUM, + uburma_get_eid_list_fill_spec_out, GET_EID_LIST_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_GET_NETADDR_LIST] = { + uburma_get_net_addr_list_fill_spec_in, GET_NET_ADDR_LIST_IN_NUM, + uburma_get_net_addr_list_fill_spec_out, GET_NET_ADDR_LIST_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_MODIFY_TP] = { + uburma_modify_tp_fill_spec_in, MODIFY_TP_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_QUERY_DEV_ATTR] = { + uburma_query_device_fill_spec_in, QUERY_DEVICE_IN_NUM, + uburma_query_device_fill_spec_out, QUERY_DEVICE_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_IMPORT_JETTY_ASYNC] = { + uburma_import_jetty_async_fill_spec_in, IMPORT_JETTY_ASYNC_IN_NUM, + uburma_import_jetty_async_fill_spec_out, IMPORT_JETTY_ASYNC_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNIMPORT_JETTY_ASYNC] = { + uburma_unimport_jetty_async_fill_spec_in, UNIMPORT_JETTY_ASYNC_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_BIND_JETTY_ASYNC] = { + uburma_bind_jetty_async_fill_spec_in, BIND_JETTY_ASYNC_IN_NUM, + uburma_bind_jetty_async_fill_spec_out, BIND_JETTY_ASYNC_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_UNBIND_JETTY_ASYNC] = { + uburma_unbind_jetty_async_fill_spec_in, UNBIND_JETTY_ASYNC_IN_NUM, + NULL, 0, + }, + [UBURMA_CMD_CREATE_NOTIFIER] = { + NULL, 0, + uburma_create_notifier_fill_spec_out, CREATE_NOTIFIER_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_GET_TP_LIST] = { + uburma_get_tp_list_fill_spec_in, GET_TP_LIST_IN_NUM, + uburma_get_tp_list_fill_spec_out, GET_TP_LIST_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_IMPORT_JETTY_EX] = { + uburma_import_jetty_ex_fill_spec_in, IMPORT_JETTY_EX_IN_NUM, + uburma_import_jetty_ex_fill_spec_out, IMPORT_JETTY_EX_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_IMPORT_JFR_EX] = { + uburma_import_jfr_ex_fill_spec_in, IMPORT_JFR_EX_IN_NUM, + uburma_import_jfr_ex_fill_spec_out, IMPORT_JFR_EX_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_BIND_JETTY_EX] = { + uburma_bind_jetty_ex_fill_spec_in, BIND_JETTY_EX_IN_NUM, + uburma_bind_jetty_ex_fill_spec_out, BIND_JETTY_EX_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFS_BATCH] = { + uburma_delete_jfs_batch_fill_spec_in, DELETE_JFS_BATCH_IN_NUM, + uburma_delete_jfs_batch_fill_spec_out, DELETE_JFS_BATCH_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFR_BATCH] = { + uburma_delete_jfr_batch_fill_spec_in, DELETE_JFR_BATCH_IN_NUM, + uburma_delete_jfr_batch_fill_spec_out, DELETE_JFR_BATCH_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JFC_BATCH] = { + uburma_delete_jfc_batch_fill_spec_in, DELETE_JFC_BATCH_IN_NUM, + uburma_delete_jfc_batch_fill_spec_out, DELETE_JFC_BATCH_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_DELETE_JETTY_BATCH] = { + uburma_delete_jetty_batch_fill_spec_in, DELETE_JETTY_BATCH_IN_NUM, + uburma_delete_jetty_batch_fill_spec_out, DELETE_JETTY_BATCH_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_SET_TP_ATTR] = { + uburma_set_tp_attr_fill_spec_in, SET_TP_ATTR_IN_NUM, + uburma_set_tp_attr_fill_spec_out, SET_TP_ATTR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_GET_TP_ATTR] = { + uburma_get_tp_attr_fill_spec_in, GET_TP_ATTR_IN_NUM, + uburma_get_tp_attr_fill_spec_out, GET_TP_ATTR_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_CMD_EXCHANGE_TP_INFO] = { + uburma_exchange_tp_info_fill_spec_in, EXCHANGE_TP_INFO_IN_NUM, + uburma_exchange_tp_info_fill_spec_out, EXCHANGE_TP_INFO_OUT_NUM - + UBURMA_CMD_OUT_TYPE_INIT, + }, +}; + +static struct uburma_cmd_attr * +uburma_create_tlv_attr(struct uburma_cmd_hdr *hdr, + uint32_t *attr_size) +{ + struct uburma_cmd_attr *attr; + int ret; + + if (hdr->args_len % sizeof(struct uburma_cmd_attr) != 0 || + hdr->args_len >= UBURMA_CMD_TLV_MAX_LEN) { + uburma_log_err("Invalid args_len: %u.\n", + hdr->args_len); + return NULL; + } + attr = kzalloc(hdr->args_len, GFP_KERNEL); + if (attr == NULL) + return NULL; + + ret = uburma_copy_from_user( + attr, (void __user *)(uintptr_t)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + kfree(attr); + return NULL; + } + *attr_size = hdr->args_len / sizeof(struct uburma_cmd_attr); + return attr; +} + +static int uburma_cmd_tlv_parse_type(struct uburma_cmd_spec *spec, + struct uburma_cmd_attr *attr) +{ + uintptr_t ptr_src, ptr_dst; + uint32_t i; + int ret; + + /* length of uburma spec and from uvs should be strictly checked */ + /* as length of uvs ioctl attr should be strictly equal to length of uburma */ + if (spec->field_size != attr->field_size || + spec->attr_data.bs.el_num != attr->attr_data.bs.el_num) { + uburma_log_err( + "Invalid attr, spec/attr, field_size: %u/%u, el_num: %u/%u, type: %u.\n", + spec->field_size, attr->field_size, + spec->attr_data.bs.el_num, + attr->attr_data.bs.el_num, spec->type); + return -EINVAL; + } + + for (i = 0; i < spec->attr_data.bs.el_num; i++) { + ptr_dst = + (spec->data) + i * spec->attr_data.bs.el_size; + ptr_src = + (attr->data) + i * attr->attr_data.bs.el_size; + ret = uburma_copy_from_user((void *)ptr_dst, + (void __user *)ptr_src, + spec->field_size); + if (ret != 0) + return ret; + } + + return ret; +} + +static int uburma_cmd_tlv_parse(struct uburma_cmd_spec *spec, + uint32_t spec_size, + struct uburma_cmd_attr *attr, + uint32_t attr_size) +{ + uint32_t spec_idx, attr_idx; + bool match; + int ret; + + /* spec type of this range is only in type */ + for (spec_idx = 0; spec_idx < spec_size; spec_idx++) { + match = false; + for (attr_idx = 0; attr_idx < attr_size; attr_idx++) { + if (spec[spec_idx].type == + attr[attr_idx].type) { + ret = uburma_cmd_tlv_parse_type( + &spec[spec_idx], + &attr[attr_idx]); + if (ret != 0) + return ret; + match = true; + break; + } + } + if (!match && + ((spec[spec_idx].flag.bs.mandatory) != 0)) { + uburma_log_err( + "Failed to match mandatory in type: %u.\n", + spec[spec_idx].type); + return -1; + } + } + + return 0; +} + +static int uburma_cmd_tlv_append_type(struct uburma_cmd_spec *spec, + struct uburma_cmd_attr *attr) +{ + uintptr_t ptr_src, ptr_dst; + uint32_t i; + int ret; + + /* length of uburma spec and from uvs should be strictly checked */ + /* as length of uvs ioctl attr should be strictly equal to length of uburma */ + if (spec->field_size != attr->field_size || + spec->attr_data.bs.el_num > attr->attr_data.bs.el_num) { + uburma_log_err( + "Invalid attr, spec/attr, field_size: %u/%u, array_size: %u/%u, type: %u.\n", + spec->field_size, attr->field_size, + spec->attr_data.bs.el_num, + attr->attr_data.bs.el_num, spec->type); + return -EINVAL; + } + + for (i = 0; i < spec->attr_data.bs.el_num; i++) { + ptr_src = + (spec->data) + i * spec->attr_data.bs.el_size; + ptr_dst = + (attr->data) + i * attr->attr_data.bs.el_size; + ret = uburma_copy_to_user((void __user *)ptr_dst, + (void *)ptr_src, + spec->field_size); + if (ret != 0) + return ret; + } + + return ret; +} + +static int uburma_cmd_tlv_append(struct uburma_cmd_spec *spec, + uint32_t spec_size, + struct uburma_cmd_attr *attr, + uint32_t attr_size) +{ + uint32_t spec_idx, attr_idx; + bool match; + int ret; + + for (spec_idx = 0; spec_idx < spec_size; spec_idx++) { + match = false; + for (attr_idx = 0; attr_idx < attr_size; attr_idx++) { + if (spec[spec_idx].type == + attr[attr_idx].type) { + ret = uburma_cmd_tlv_append_type( + &spec[spec_idx], + &attr[attr_idx]); + if (ret != 0) + return ret; + match = true; + break; + } + } + if (!match && spec[spec_idx].flag.bs.mandatory) { + uburma_log_err( + "Failed to match mandatory out type: %u.\n", + spec[spec_idx].type); + return -1; + } + } + + return 0; +} + +int uburma_tlv_parse(struct uburma_cmd_hdr *hdr, void *arg) +{ + struct uburma_cmd_spec *spec = NULL; + struct uburma_cmd_attr *attr = NULL; + uint32_t attr_size, spec_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (g_tlv_handler[hdr->command].fill_spec_in == NULL) { + uburma_log_err("Invalid command: %u.\n", + hdr->command); + return -EINVAL; + } + + spec_size = g_tlv_handler[hdr->command].spec_in_len; + spec = kcalloc(spec_size, sizeof(struct uburma_cmd_spec), + GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + g_tlv_handler[hdr->command].fill_spec_in(arg, spec); + + attr = uburma_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = uburma_cmd_tlv_parse(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} + +int uburma_tlv_append(struct uburma_cmd_hdr *hdr, void *arg) +{ + struct uburma_cmd_spec *spec = NULL; + struct uburma_cmd_attr *attr = NULL; + uint32_t attr_size, spec_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (g_tlv_handler[hdr->command].fill_spec_out == NULL) { + uburma_log_err("Invalid command: %u.\n", + hdr->command); + return -EINVAL; + } + + spec_size = g_tlv_handler[hdr->command].spec_out_len; + spec = kcalloc(spec_size, sizeof(struct uburma_cmd_spec), + GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + g_tlv_handler[hdr->command].fill_spec_out(arg, spec); + + attr = uburma_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = uburma_cmd_tlv_append(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} + +static void +uburma_wait_jfce_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_jfce_wait *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, JFCE_WAIT_IN_MAX_EVENT_CNT, arg->in.max_event_cnt); + SPEC(s++, JFCE_WAIT_IN_TIME_OUT, arg->in.time_out); +} + +static void +uburma_wait_jfce_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_jfce_wait *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, JFCE_WAIT_OUT_EVENT_CNT, arg->out.event_cnt); + SPEC(s++, JFCE_WAIT_OUT_EVENT_DATA, arg->out.event_data); +} + +static void +uburma_get_async_event_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_async_event *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, GET_ASYNC_EVENT_OUT_EVENT_TYPE, arg->event_type); + SPEC(s++, GET_ASYNC_EVENT_OUT_EVENT_DATA, arg->event_data); +} + +static void +uburma_wait_notify_fill_spec_in(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_wait_notify *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, WAIT_NOTIFY_IN_CNT, arg->in.cnt); + SPEC(s++, WAIT_NOTIFY_IN_TIMEOUT, arg->in.timeout); +} + +static void +uburma_wait_notify_fill_spec_out(void *arg_addr, + struct uburma_cmd_spec *spec) +{ + struct uburma_cmd_wait_notify *arg = arg_addr; + struct uburma_cmd_spec *s = spec; + + SPEC(s++, WAIT_NOTIFY_OUT_CNT, arg->out.cnt); + SPEC(s++, WAIT_NOTIFY_OUT_NOTIFY, arg->out.notify); +} + +static struct uburma_tlv_handler g_event_tlv_handler[] = { + [0] = {0}, + [UBURMA_EVENT_CMD_WAIT_JFCE] = { + uburma_wait_jfce_fill_spec_in, JFCE_WAIT_IN_NUM, + uburma_wait_jfce_fill_spec_out, JFCE_WAIT_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_EVENT_CMD_GET_ASYNC_EVENT] = { + NULL, 0, + uburma_get_async_event_fill_spec_out, + GET_ASYNC_EVENT_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + }, + [UBURMA_EVENT_CMD_WAIT_NOTIFY] = { + uburma_wait_notify_fill_spec_in, WAIT_NOTIFY_IN_NUM, + uburma_wait_notify_fill_spec_out, WAIT_NOTIFY_OUT_NUM - UBURMA_CMD_OUT_TYPE_INIT, + } +}; + +int uburma_event_tlv_parse(struct uburma_cmd_hdr *hdr, void *arg) +{ + struct uburma_cmd_spec *spec = NULL; + struct uburma_cmd_attr *attr = NULL; + uint32_t attr_size, spec_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (g_event_tlv_handler[hdr->command].fill_spec_in == NULL) { + uburma_log_err("Invalid command: %u.\n", + hdr->command); + return -EINVAL; + } + + spec_size = g_event_tlv_handler[hdr->command].spec_in_len; + spec = kcalloc(spec_size, sizeof(struct uburma_cmd_spec), + GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + g_event_tlv_handler[hdr->command].fill_spec_in(arg, spec); + + attr = uburma_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = uburma_cmd_tlv_parse(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} + +int uburma_event_tlv_append(struct uburma_cmd_hdr *hdr, void *arg) +{ + struct uburma_cmd_spec *spec = NULL; + struct uburma_cmd_attr *attr = NULL; + uint32_t attr_size, spec_size; + int ret; + + /* Command of hdr is valid, no need to check it */ + if (g_event_tlv_handler[hdr->command].fill_spec_out == NULL) { + uburma_log_err("Invalid command: %u.\n", + hdr->command); + return -EINVAL; + } + + spec_size = g_event_tlv_handler[hdr->command].spec_out_len; + spec = kcalloc(spec_size, sizeof(struct uburma_cmd_spec), + GFP_KERNEL); + if (spec == NULL) + return -ENOMEM; + + g_event_tlv_handler[hdr->command].fill_spec_out(arg, spec); + + attr = uburma_create_tlv_attr(hdr, &attr_size); + if (attr == NULL) { + ret = -ENOMEM; + goto free_spec; + } + + ret = uburma_cmd_tlv_append(spec, spec_size, attr, attr_size); + + kfree(attr); +free_spec: + kfree(spec); + return ret; +} diff --git a/drivers/ub/urma/uburma/uburma_cmd_tlv.h b/drivers/ub/urma/uburma/uburma_cmd_tlv.h new file mode 100644 index 000000000000..e042cb26c89a --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_cmd_tlv.h @@ -0,0 +1,949 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma cmd tlv parse header, uburma cmd struct consists of + * type/length/value, ioctl operations are copyed and parsed by tlv form + * Author: Wang Hang + * Create: 2024-08-27 + * Note: + * History: 2024-08-27: create file + */ + +#ifndef UBURMA_CMD_TLV_H +#define UBURMA_CMD_TLV_H + +#include + +#include "uburma_cmd.h" + +#define UBURMA_CMD_OUT_TYPE_INIT 0x80 + +enum uburma_event_cmd { + UBURMA_EVENT_CMD_WAIT_JFCE = 1, + UBURMA_EVENT_CMD_GET_ASYNC_EVENT, + UBURMA_EVENT_CMD_WAIT_NOTIFY, + UBURMA_EVENT_CMD_MAX, +}; + +struct uburma_cmd_attr { + uint8_t type; /* See enum uburma_cmd_xxx_type */ + uint8_t flag; + uint16_t field_size; + union { + struct { + uint32_t el_num : 12; /* Array element number if field is in an array */ + uint32_t el_size : 12; /* Array element size if field is in an array */ + uint32_t reserved : 8; + } bs; + uint32_t value; + } attr_data; + uint64_t data; +}; + +struct uburma_cmd_spec { + uint8_t type; /* See uburma_cmd_xxx_type_t */ + union { + struct { + uint8_t mandatory : 1; + } bs; + uint8_t value; + } flag; + uint16_t field_size; + union { + struct { + uint32_t el_num : 12; /* Array element number if field is in an array */ + uint32_t el_size : 12; /* Array element size if field is in an array */ + uint32_t reserved : 8; + } bs; + uint32_t value; + } attr_data; + uint64_t data; +}; + +/* See struct uburma_cmd_create_ctx, in/out type should be continuous */ +enum uburma_cmd_create_ctx_type { + /* In type */ + CREATE_CTX_IN_EID, + CREATE_CTX_IN_EID_INDEX, + CREATE_CTX_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + CREATE_CTX_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_CTX_OUT_ASYNC_FD = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_CTX_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + CREATE_CTX_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_alloc_token_id, in/out type should be continuous */ +enum uburma_cmd_alloc_token_id_type { + /* In type */ + ALLOC_TOKEN_ID_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + ALLOC_TOKEN_ID_IN_FLAG, /* For multi seg per token id stand */ + ALLOC_TOKEN_ID_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + ALLOC_TOKEN_ID_OUT_TOKEN_ID = UBURMA_CMD_OUT_TYPE_INIT, + ALLOC_TOKEN_ID_OUT_HANDLE, + ALLOC_TOKEN_ID_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + ALLOC_TOKEN_ID_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_free_token_id */ +/* udata is not required in out/in type */ +enum uburma_cmd_free_token_id_type { + /* In type */ + FREE_TOKEN_ID_IN_HANDLE, + FREE_TOKEN_ID_IN_TOKEN_ID, + FREE_TOKEN_ID_IN_NUM /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_register_seg, in/out type should be continuous */ +enum uburma_cmd_register_seg_type { + /* In type */ + REGISTER_SEG_IN_VA, + REGISTER_SEG_IN_LEN, + REGISTER_SEG_IN_TOKEN_ID, + REGISTER_SEG_IN_TOKEN_ID_HANDLE, + REGISTER_SEG_IN_TOKEN, + REGISTER_SEG_IN_FLAG, + REGISTER_SEG_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + REGISTER_SEG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + REGISTER_SEG_OUT_TOKEN_ID = UBURMA_CMD_OUT_TYPE_INIT, + REGISTER_SEG_OUT_HANDLE, + REGISTER_SEG_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + REGISTER_SEG_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unregister_seg, in/out type should be continuous */ +enum uburma_cmd_unregister_seg_type { + /* In type */ + UNREGISTER_SEG_IN_HANDLE, + UNREGISTER_SEG_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_import_seg, in/out type should be continuous */ +enum uburma_cmd_import_seg_type { + /* In type */ + IMPORT_SEG_IN_EID, + IMPORT_SEG_IN_VA, + IMPORT_SEG_IN_LEN, + IMPORT_SEG_IN_FLAG, + IMPORT_SEG_IN_TOKEN, + IMPORT_SEG_IN_TOKEN_ID, + IMPORT_SEG_IN_MVA, + IMPORT_SEG_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + IMPORT_SEG_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_SEG_OUT_HANDLE = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_SEG_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + IMPORT_SEG_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unimport_seg */ +enum uburma_cmd_unimport_seg_type { + /* In type */ + UNIMPORT_SEG_IN_HANDLE, + UNIMPORT_SEG_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jfs, in/out type should be continuous */ +enum uburma_cmd_create_jfs_type { + /* In type */ + CREATE_JFS_IN_DEPTH, + CREATE_JFS_IN_FLAG, + CREATE_JFS_IN_TRANS_MODE, + CREATE_JFS_IN_PRIORITY, + CREATE_JFS_IN_MAX_SGE, + CREATE_JFS_IN_MAX_RSGE, + CREATE_JFS_IN_MAX_INLINE_DATA, + CREATE_JFS_IN_RETRY_CNT, + CREATE_JFS_IN_RNR_RETRY, + CREATE_JFS_IN_ERR_TIMEOUT, + CREATE_JFS_IN_JFC_ID, + CREATE_JFS_IN_JFC_HANDLE, + CREATE_JFS_IN_URMA_JFS, + CREATE_JFS_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + CREATE_JFS_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_JFS_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JFS_OUT_DEPTH, + CREATE_JFS_OUT_MAX_SGE, + CREATE_JFS_OUT_MAX_RSGE, + CREATE_JFS_OUT_MAX_INLINE_DATA, + CREATE_JFS_OUT_HANDLE, + CREATE_JFS_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + CREATE_JFS_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_modify_jfs, in/out type should be continuous */ +enum uburma_cmd_modify_jfs_type { + /* In type */ + MODIFY_JFS_IN_HANDLE, + MODIFY_JFS_IN_MASK, + MODIFY_JFS_IN_STATE, + MODIFY_JFS_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + MODIFY_JFS_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + /* Consider udata as an ordinary member of out specs */ + MODIFY_JFS_OUT_UDATA = UBURMA_CMD_OUT_TYPE_INIT, + MODIFY_JFS_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_query_jfs, in/out type should be continuous */ +enum uburma_cmd_query_jfs_type { + /* In type */ + QUERY_JFS_IN_HANDLE, + QUERY_JFS_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + QUERY_JFS_OUT_DEPTH = UBURMA_CMD_OUT_TYPE_INIT, + QUERY_JFS_OUT_FLAG, + QUERY_JFS_OUT_TRANS_MODE, + QUERY_JFS_OUT_PRIORITY, + QUERY_JFS_OUT_MAX_SGE, + QUERY_JFS_OUT_MAX_RSGE, + QUERY_JFS_OUT_MAX_INLINE_DATA, + QUERY_JFS_OUT_RETRY_CNT, + QUERY_JFS_OUT_RNR_RETRY, + QUERY_JFS_OUT_ERR_TIMEOUT, + QUERY_JFS_OUT_STATE, + QUERY_JFS_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfs, in/out type should be continuous */ +enum uburma_cmd_delete_jfs_type { + /* In type */ + DELETE_JFS_IN_HANDLE, + DELETE_JFS_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFS_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFS_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfs_batch, in/out type should be continuous */ +enum uburma_cmd_delete_jfs_batch_type { + /* In type */ + DELETE_JFS_BATCH_IN_JFS_COUNT, + DELETE_JFS_BATCH_IN_JFS_PTR, + DELETE_JFS_BATCH_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFS_BATCH_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFS_BATCH_OUT_BAD_JFS_INDEX, + DELETE_JFS_BATCH_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jfr, in/out type should be continuous */ +enum uburma_cmd_create_jfr_type { + /* In type */ + CREATE_JFR_IN_DEPTH, + CREATE_JFR_IN_FLAG, + CREATE_JFR_IN_TRANS_MODE, + CREATE_JFR_IN_MAX_SGE, + CREATE_JFR_IN_MIN_RNR_TIMER, + CREATE_JFR_IN_JFC_ID, + CREATE_JFR_IN_JFC_HANDLE, + CREATE_JFR_IN_TOKEN, + CREATE_JFR_IN_ID, + CREATE_JFR_IN_URMA_JFR, + CREATE_JFR_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + CREATE_JFR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_JFR_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JFR_OUT_DEPTH, + CREATE_JFR_OUT_MAX_SGE, + CREATE_JFR_OUT_HANDLE, + CREATE_JFR_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + CREATE_JFR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_modify_jfr, in/out type should be continuous */ +enum uburma_cmd_modify_jfr_type { + /* In type */ + MODIFY_JFR_IN_HANDLE, + MODIFY_JFR_IN_MASK, + MODIFY_JFR_IN_RX_THRESHOLD, + MODIFY_JFR_IN_STATE, + MODIFY_JFR_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + MODIFY_JFR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + /* Consider udata as an ordinary member of out specs */ + MODIFY_JFR_OUT_UDATA = UBURMA_CMD_OUT_TYPE_INIT, + MODIFY_JFR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_query_jfr, in/out type should be continuous */ +enum uburma_cmd_query_jfr_type { + /* In type */ + QUERY_JFR_IN_HANDLE, + QUERY_JFR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + QUERY_JFR_OUT_DEPTH = UBURMA_CMD_OUT_TYPE_INIT, + QUERY_JFR_OUT_FLAG, + QUERY_JFR_OUT_TRANS_MODE, + QUERY_JFR_OUT_MAX_SGE, + QUERY_JFR_OUT_MIN_RNR_TIMER, + QUERY_JFR_OUT_TOKEN, + QUERY_JFR_OUT_ID, + QUERY_JFR_OUT_RX_THRESHOLD, + QUERY_JFR_OUT_STATE, + QUERY_JFR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfr, in/out type should be continuous */ +enum uburma_cmd_delete_jfr_type { + /* In type */ + DELETE_JFR_IN_HANDLE, + DELETE_JFR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFR_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfr_batch, in/out type should be continuous */ +enum uburma_cmd_delete_jfr_batch_type { + /* In type */ + DELETE_JFR_BATCH_IN_JFR_COUNT, + DELETE_JFR_BATCH_IN_JFR_PTR, + DELETE_JFR_BATCH_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFR_BATCH_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFR_BATCH_OUT_BAD_JFR_INDEX, + DELETE_JFR_BATCH_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jfc, in/out type should be continuous */ +enum uburma_cmd_create_jfc_type { + /* In type */ + CREATE_JFC_IN_DEPTH, + CREATE_JFC_IN_FLAG, + CREATE_JFC_IN_JFCE_FD, + CREATE_JFC_IN_URMA_JFC, + CREATE_JFC_IN_CEQN, + CREATE_JFC_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + CREATE_JFC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_JFC_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JFC_OUT_DEPTH, + CREATE_JFC_OUT_HANDLE, + CREATE_JFC_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + CREATE_JFC_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_modify_jfc, in/out type should be continuous */ +enum uburma_cmd_modify_jfc_type { + /* In type */ + MODIFY_JFC_IN_HANDLE, + MODIFY_JFC_IN_MASK, + MODIFY_JFC_IN_MODERATE_COUNT, + MODIFY_JFC_IN_MODERATE_PERIOD, + MODIFY_JFC_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + MODIFY_JFC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + /* Consider udata as an ordinary member of out specs */ + MODIFY_JFC_OUT_UDATA = UBURMA_CMD_OUT_TYPE_INIT, + MODIFY_JFC_OUT_NUM /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfc, in/out type should be continuous */ +enum uburma_cmd_delete_jfc_type { + /* In type */ + DELETE_JFC_IN_HANDLE, + DELETE_JFC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFC_OUT_COMP_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFC_OUT_ASYNC_EVENTS_REPORTED, + DELETE_JFC_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jfc_batch, in/out type should be continuous */ +enum uburma_cmd_delete_jfc_batch_type { + /* In type */ + DELETE_JFC_BATCH_IN_JFC_COUNT, + DELETE_JFC_BATCH_IN_JFC_PTR, + DELETE_JFC_BATCH_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JFC_BATCH_OUT_COMP_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JFC_BATCH_OUT_ASYNC_EVENTS_REPORTED, + DELETE_JFC_BATCH_OUT_BAD_JFC_INDEX, + DELETE_JFC_BATCH_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jfce, in/out type should be continuous */ +enum uburma_cmd_create_jfce_type { + /* Out type */ + CREATE_JFCE_OUT_FD = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JFCE_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_import_jfr, in/out type should be continuous */ +enum uburma_cmd_import_jfr_type { + /* In type */ + IMPORT_JFR_IN_EID, + IMPORT_JFR_IN_ID, + IMPORT_JFR_IN_FLAG, + IMPORT_JFR_IN_TOKEN, + IMPORT_JFR_IN_TRANS_MODE, + IMPORT_JFR_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + IMPORT_JFR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_JFR_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_JFR_OUT_HANDLE, + IMPORT_JFR_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + IMPORT_JFR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_import_jfr_ex, in/out type should be continuous */ +enum uburma_cmd_import_jfr_ex_type { + /* In type */ + IMPORT_JFR_EX_IN_EID, + IMPORT_JFR_EX_IN_ID, + IMPORT_JFR_EX_IN_FLAG, + IMPORT_JFR_EX_IN_TOKEN, + IMPORT_JFR_EX_IN_TRANS_MODE, + IMPORT_JFR_EX_IN_TP_HANDLE, + IMPORT_JFR_EX_IN_PEER_TP_HANDLE, + IMPORT_JFR_EX_IN_TAG, + IMPORT_JFR_EX_IN_TX_PSN, + IMPORT_JFR_EX_IN_RX_PSN, + IMPORT_JFR_EX_IN_UDATA, /* Consider udata as an ordinary member of in specs */ + IMPORT_JFR_EX_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_JFR_EX_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_JFR_EX_OUT_HANDLE, + IMPORT_JFR_EX_OUT_UDATA, /* Consider udata as an ordinary member of out specs */ + IMPORT_JFR_EX_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unimport_jfr, in/out type should be continuous */ +enum uburma_cmd_unimport_jfr_type { + /* In type */ + UNIMPORT_JFR_IN_HANDLE, + UNIMPORT_JFR_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jetty, in/out type should be continuous */ +enum uburma_cmd_create_jetty_type { + /* In type */ + CREATE_JETTY_IN_ID, + CREATE_JETTY_IN_JETTY_FLAG, + CREATE_JETTY_IN_JFS_DEPTH, + CREATE_JETTY_IN_JFS_FLAG, + CREATE_JETTY_IN_TRANS_MODE, + CREATE_JETTY_IN_PRIORITY, + CREATE_JETTY_IN_MAX_SEND_SGE, + CREATE_JETTY_IN_MAX_SEND_RSGE, + CREATE_JETTY_IN_MAX_INLINE_DATA, + CREATE_JETTY_IN_RNR_RETRY, + CREATE_JETTY_IN_ERR_TIMEOUT, + CREATE_JETTY_IN_SEND_JFC_ID, + CREATE_JETTY_IN_SEND_JFC_HANDLE, + CREATE_JETTY_IN_JFR_DEPTH, + CREATE_JETTY_IN_JFR_FLAG, + CREATE_JETTY_IN_MAX_RECV_SGE, + CREATE_JETTY_IN_MIN_RNR_TIMER, + CREATE_JETTY_IN_RECV_JFC_ID, + CREATE_JETTY_IN_RECV_JFC_HANDLE, + CREATE_JETTY_IN_TOKEN, + CREATE_JETTY_IN_JFR_ID, + CREATE_JETTY_IN_JFR_HANDLE, + CREATE_JETTY_IN_JETTY_GRP_HANDLE, + CREATE_JETTY_IN_IS_JETTY_GRP, + CREATE_JETTY_IN_URMA_JETTY, + CREATE_JETTY_IN_UDATA, /* Consider udata as an in/out attr */ + CREATE_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_JETTY_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JETTY_OUT_HANDLE, + CREATE_JETTY_OUT_JFS_DEPTH, + CREATE_JETTY_OUT_JFR_DEPTH, + CREATE_JETTY_OUT_MAX_SEND_SGE, + CREATE_JETTY_OUT_MAX_SEND_RSGE, + CREATE_JETTY_OUT_MAX_RECV_SGE, + CREATE_JETTY_OUT_MAX_INLINE_DATA, + CREATE_JETTY_OUT_UDATA, /* Consider udata as an in/out attr */ + CREATE_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_modify_jetty, in/out type should be continuous */ +enum uburma_cmd_modify_jetty_type { + /* In type */ + MODIFY_JETTY_IN_HANDLE, + MODIFY_JETTY_IN_MASK, + MODIFY_JETTY_IN_RX_THRESHOLD, + MODIFY_JETTY_IN_STATE, + MODIFY_JETTY_IN_UDATA, /* Consider udata as an in/out attr */ + MODIFY_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + MODIFY_JETTY_OUT_UDATA = + UBURMA_CMD_OUT_TYPE_INIT, /* Consider udata as an in/out attr */ + MODIFY_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_query_jetty, in/out type should be continuous */ +enum uburma_cmd_query_jetty_type { + /* In type */ + QUERY_JETTY_IN_HANDLE, + QUERY_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + QUERY_JETTY_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + QUERY_JETTY_OUT_JETTY_FLAG, + QUERY_JETTY_OUT_JFS_DEPTH, + QUERY_JETTY_OUT_JFR_DEPTH, + QUERY_JETTY_OUT_JFS_FLAG, + QUERY_JETTY_OUT_JFR_FLAG, + QUERY_JETTY_OUT_TRANS_MODE, + QUERY_JETTY_OUT_MAX_SEND_SGE, + QUERY_JETTY_OUT_MAX_SEND_RSGE, + QUERY_JETTY_OUT_MAX_RECV_SGE, + QUERY_JETTY_OUT_MAX_INLINE_DATA, + QUERY_JETTY_OUT_PRIORITY, + QUERY_JETTY_OUT_RETRY_CNT, + QUERY_JETTY_OUT_RNR_RETRY, + QUERY_JETTY_OUT_ERR_TIMEOUT, + QUERY_JETTY_OUT_MIN_RNR_TIMER, + QUERY_JETTY_OUT_JFR_ID, + QUERY_JETTY_OUT_TOKEN, + QUERY_JETTY_OUT_RX_THRESHOLD, + QUERY_JETTY_OUT_STATE, + QUERY_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jetty, in/out type should be continuous */ +enum uburma_cmd_delete_jetty_type { + /* In type */ + DELETE_JETTY_IN_HANDLE, + DELETE_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JETTY_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jetty_batch, in/out type should be continuous */ +enum uburma_cmd_delete_jetty_batch_type { + /* In type */ + DELETE_JETTY_BATCH_IN_JETTY_COUNT, + DELETE_JETTY_BATCH_IN_JETTY_PTR, + DELETE_JETTY_BATCH_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JETTY_BATCH_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JETTY_BATCH_OUT_BAD_JETTY_INDEX, + DELETE_JETTY_BATCH_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_import_jetty, in/out type should be continuous */ +enum uburma_cmd_import_jetty_type { + /* In type */ + IMPORT_JETTY_IN_EID, + IMPORT_JETTY_IN_ID, + IMPORT_JETTY_IN_FLAG, + IMPORT_JETTY_IN_TOKEN, + IMPORT_JETTY_IN_TRANS_MODE, + IMPORT_JETTY_IN_POLICY, + IMPORT_JETTY_IN_TYPE, + IMPORT_JETTY_IN_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_JETTY_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_JETTY_OUT_HANDLE, + IMPORT_JETTY_OUT_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_import_jetty_ex, in/out type should be continuous */ +enum uburma_cmd_import_jetty_ex_type { + /* In type */ + IMPORT_JETTY_EX_IN_EID, + IMPORT_JETTY_EX_IN_ID, + IMPORT_JETTY_EX_IN_FLAG, + IMPORT_JETTY_EX_IN_TOKEN, + IMPORT_JETTY_EX_IN_TRANS_MODE, + IMPORT_JETTY_EX_IN_POLICY, + IMPORT_JETTY_EX_IN_TYPE, + IMPORT_JETTY_EX_IN_TP_HANDLE, + IMPORT_JETTY_EX_IN_PEER_TP_HANDLE, + IMPORT_JETTY_EX_IN_TAG, + IMPORT_JETTY_EX_IN_TX_PSN, + IMPORT_JETTY_EX_IN_RX_PSN, + IMPORT_JETTY_EX_IN_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_EX_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_JETTY_EX_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_JETTY_EX_OUT_HANDLE, + IMPORT_JETTY_EX_OUT_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_EX_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unimport_jetty, in/out type should be continuous */ +enum uburma_cmd_unimport_jetty_type { + /* In type */ + UNIMPORT_JETTY_IN_HANDLE, + UNIMPORT_JETTY_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_advise_jetty, in/out type should be continuous */ +enum uburma_cmd_advise_jetty_type { + /* In type */ + ADVISE_JETTY_IN_JETTY_HANDLE, + ADVISE_JETTY_IN_TJETTY_HANDLE, + ADVISE_JETTY_IN_UDATA, /* Consider udata as an in/out attr */ + ADVISE_JETTY_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unadvise_jetty, in/out type should be continuous */ +enum uburma_cmd_unadvise_jetty_type { + /* In type */ + UNADVISE_JETTY_IN_JETTY_HANDLE, + UNADVISE_JETTY_IN_TJETTY_HANDLE, + UNADVISE_JETTY_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_bind_jetty, in/out type should be continuous */ +enum uburma_cmd_bind_jetty_type { + /* In type */ + BIND_JETTY_IN_JETTY_HANDLE, + BIND_JETTY_IN_TJETTY_HANDLE, + BIND_JETTY_IN_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + BIND_JETTY_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + BIND_JETTY_OUT_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_bind_jetty_ex, in/out type should be continuous */ +enum uburma_cmd_bind_jetty_ex_type { + /* In type */ + BIND_JETTY_EX_IN_JETTY_HANDLE, + BIND_JETTY_EX_IN_TJETTY_HANDLE, + BIND_JETTY_EX_IN_TP_HANDLE, + BIND_JETTY_EX_IN_PEER_TP_HANDLE, + BIND_JETTY_EX_IN_TAG, + BIND_JETTY_EX_IN_TX_PSN, + BIND_JETTY_EX_IN_RX_PSN, + BIND_JETTY_EX_IN_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_EX_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + BIND_JETTY_EX_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + BIND_JETTY_EX_OUT_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_EX_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_jetty_grp, in/out type should be continuous */ +enum uburma_cmd_create_jetty_grp_type { + /* In type */ + CREATE_JETTY_GRP_IN_NAME, + CREATE_JETTY_GRP_IN_TOKEN, + CREATE_JETTY_GRP_IN_ID, + CREATE_JETTY_GRP_IN_POLICY, + CREATE_JETTY_GRP_IN_FLAG, + CREATE_JETTY_GRP_IN_URMA_JETTY_GRP, + CREATE_JETTY_GRP_IN_UDATA, /* Consider udata as an in/out attr */ + CREATE_JETTY_GRP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + CREATE_JETTY_GRP_OUT_ID = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_JETTY_GRP_OUT_HANDLE, + CREATE_JETTY_GRP_OUT_UDATA, /* Consider udata as an in/out attr */ + CREATE_JETTY_GRP_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_delete_jetty_grp, in/out type should be continuous */ +enum uburma_cmd_delete_jetty_grp_type { + /* In type */ + DELETE_JETTY_GRP_IN_HANDLE, + DELETE_JETTY_GRP_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + DELETE_JETTY_GRP_OUT_ASYNC_EVENTS_REPORTED = UBURMA_CMD_OUT_TYPE_INIT, + DELETE_JETTY_GRP_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_user_ctl, in/out type should be continuous */ +enum uburma_cmd_user_ctl_type { + /* In type */ + USER_CTL_IN_IN_ADDR, + USER_CTL_IN_IN_LEN, + USER_CTL_IN_OPCODE, + USER_CTL_IN_OUT_ADDR, + USER_CTL_IN_OUT_LEN, + USER_CTL_IN_UDATA, + USER_CTL_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_get_eid_list, in/out type should be continuous */ +enum uburma_cmd_get_eid_list_type { + /* In type */ + GET_EID_LIST_IN_MAX_EID_CNT, + GET_EID_LIST_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_EID_LIST_OUT_EID_CNT = UBURMA_CMD_OUT_TYPE_INIT, + GET_EID_LIST_OUT_EID_LIST, /* This array is considered as a whole */ + GET_EID_LIST_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_get_net_addr_list, in/out type should be continuous */ +enum uburma_cmd_get_net_addr_list_type { + /* In type */ + GET_NET_ADDR_LIST_IN_MAX_NETADDR_CNT, + GET_NET_ADDR_LIST_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_NET_ADDR_LIST_OUT_NETADDR_CNT = UBURMA_CMD_OUT_TYPE_INIT, + GET_NET_ADDR_LIST_OUT_NETADDR_LIST, /* This array is considered as a whole */ + GET_NET_ADDR_LIST_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_modify_tp, in/out type should be continuous */ +enum uburma_cmd_modify_tp_type { + /* In type */ + MODIFY_TP_IN_TPN, + MODIFY_TP_IN_TP_CFG_FLAG, + MODIFY_TP_IN_TP_CFG_TRANS_MODE, + MODIFY_TP_IN_TP_CFG_RETRY_NUM, + MODIFY_TP_IN_TP_CFG_RETRY_FACTOR, + MODIFY_TP_IN_TP_CFG_ACK_TIMEOUT, + MODIFY_TP_IN_TP_CFG_DSCP, + MODIFY_TP_IN_TP_CFG_OOR_CNT, + MODIFY_TP_IN_ATTR_FLAG, + MODIFY_TP_IN_ATTR_PEER_TPN, + MODIFY_TP_IN_ATTR_STATE, + MODIFY_TP_IN_ATTR_TX_PSN, + MODIFY_TP_IN_ATTR_RX_PSN, + MODIFY_TP_IN_ATTR_MTU, + MODIFY_TP_IN_ATTR_CC_PATTERN_IDX, + MODIFY_TP_IN_ATTR_OOS_CNT, + MODIFY_TP_IN_ATTR_LOCAL_NET_ADDR_IDX, + MODIFY_TP_IN_ATTR_PEER_NET_ADDR, + MODIFY_TP_IN_ATTR_DATA_UDP_START, + MODIFY_TP_IN_ATTR_ACK_UDP_START, + MODIFY_TP_IN_ATTR_UDP_RANGE, + MODIFY_TP_IN_ATTR_HOP_LIMIT, + MODIFY_TP_IN_ATTR_FLOW_LABEL, + MODIFY_TP_IN_ATTR_PORT_ID, + MODIFY_TP_IN_ATTR_MN, + MODIFY_TP_IN_ATTR_PEER_TRANS_TYPE, + MODIFY_TP_IN_MASK, + MODIFY_TP_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_query_device_attr, in/out type should be continuous */ +enum uburma_cmd_query_device_attr_type { + /* In type */ + QUERY_DEVICE_IN_DEV_NAME, + QUERY_DEVICE_IN_NUM, + /* Out type */ + QUERY_DEVICE_OUT_GUID = UBURMA_CMD_OUT_TYPE_INIT, + QUERY_DEVICE_OUT_DEV_CAP_FEATURE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFC, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY_GRP, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JETTY_IN_JETTY_GRP, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFC_DEPTH, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_DEPTH, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR_DEPTH, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_INLINE_LEN, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_SGE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFS_RSGE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_JFR_SGE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_MSG_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_READ_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_WRITE_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_CAS_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_SWAP_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_ADD_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_SUB_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_AND_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_OR_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_MAX_FETCH_AND_XOR_SIZE, + QUERY_DEVICE_OUT_DEV_CAP_ATOMIC_FEAT, + QUERY_DEVICE_OUT_DEV_CAP_TRANS_MODE, + QUERY_DEVICE_OUT_DEV_CAP_SUB_TRANS_MODE_CAP, + QUERY_DEVICE_OUT_DEV_CAP_CONGESTION_CTRL_ALG, + QUERY_DEVICE_OUT_DEV_CAP_CEQ_CNT, + QUERY_DEVICE_OUT_DEV_CAP_MAX_TP_IN_TPG, + QUERY_DEVICE_OUT_DEV_CAP_MAX_EID_CNT, + QUERY_DEVICE_OUT_DEV_CAP_PAGE_SIZE_CAP, + QUERY_DEVICE_OUT_DEV_CAP_MAX_OOR_CNT, + QUERY_DEVICE_OUT_DEV_CAP_MN, + QUERY_DEVICE_OUT_DEV_CAP_MAX_NETADDR_CN, + QUERY_DEVICE_OUT_PORT_CNT, + QUERY_DEVICE_OUT_PORT_ATTR_MAX_MTU, + QUERY_DEVICE_OUT_PORT_ATTR_STATE, + QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_WIDTH, + QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_SPEED, + QUERY_DEVICE_OUT_PORT_ATTR_ACTIVE_MTU, + QUERY_DEVICE_OUT_RESERVED_JETTY_ID_MIN, + QUERY_DEVICE_OUT_RESERVED_JETTY_ID_MAX, + QUERY_DEVICE_OUT_NUM, +}; + +/* See struct uburma_cmd_import_jetty_async, in/out type should be continuous */ +enum uburma_cmd_import_jetty_async_type { + /* In type */ + IMPORT_JETTY_ASYNC_IN_EID, + IMPORT_JETTY_ASYNC_IN_ID, + IMPORT_JETTY_ASYNC_IN_FLAG, + IMPORT_JETTY_ASYNC_IN_TOKEN, + IMPORT_JETTY_ASYNC_IN_TRANS_MODE, + IMPORT_JETTY_ASYNC_IN_POLICY, + IMPORT_JETTY_ASYNC_IN_TYPE, + IMPORT_JETTY_ASYNC_IN_URMA_TJETTY, + IMPORT_JETTY_ASYNC_IN_USER_CTX, + IMPORT_JETTY_ASYNC_IN_FD, + IMPORT_JETTY_ASYNC_IN_TIMEOUT, + IMPORT_JETTY_ASYNC_IN_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_ASYNC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + IMPORT_JETTY_ASYNC_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + IMPORT_JETTY_ASYNC_OUT_HANDLE, + IMPORT_JETTY_ASYNC_OUT_UDATA, /* Consider udata as an in/out attr */ + IMPORT_JETTY_ASYNC_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unimport_jetty_async, in/out type should be continuous */ +enum uburma_cmd_unimport_jetty_async_type { + /* In type */ + UNIMPORT_JETTY_ASYNC_IN_HANDLE, + UNIMPORT_JETTY_ASYNC_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_bind_jetty_async, in/out type should be continuous */ +enum uburma_cmd_bind_jetty_async_type { + /* In type */ + BIND_JETTY_ASYNC_IN_JETTY_HANDLE, + BIND_JETTY_ASYNC_IN_TJETTY_HANDLE, + BIND_JETTY_ASYNC_IN_URMA_TJETTY, + BIND_JETTY_ASYNC_IN_URMA_JETTY, + BIND_JETTY_ASYNC_IN_FD, + BIND_JETTY_ASYNC_IN_USER_CTX, + BIND_JETTY_ASYNC_IN_TIMEOUT, + BIND_JETTY_ASYNC_IN_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_ASYNC_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + BIND_JETTY_ASYNC_OUT_TPN = UBURMA_CMD_OUT_TYPE_INIT, + BIND_JETTY_ASYNC_OUT_UDATA, /* Consider udata as an in/out attr */ + BIND_JETTY_ASYNC_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_unbind_jetty_async, in/out type should be continuous */ +enum uburma_cmd_unbind_jetty_async_type { + /* In type */ + UNBIND_JETTY_ASYNC_IN_JETTY_HANDLE, + UNBIND_JETTY_ASYNC_IN_TJETTY_HANDLE, + UNBIND_JETTY_ASYNC_IN_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_create_notifier, in/out type should be continuous */ +enum uburma_cmd_create_notifier_type { + /* Out type */ + CREATE_NOTIFIER_OUT_FD = UBURMA_CMD_OUT_TYPE_INIT, + CREATE_NOTIFIER_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_get_tp_list, in/out type should be continuous */ +enum uburma_cmd_get_tp_list_type { + /* In type */ + GET_TP_LIST_IN_FLAG, + GET_TP_LIST_IN_TRANS_MODE, + GET_TP_LIST_IN_LOCAL_EID, + GET_TP_LIST_IN_PEER_EID, + GET_TP_LIST_IN_TP_CNT, + GET_TP_LIST_IN_UDATA, + GET_TP_LIST_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_TP_LIST_OUT_TP_CNT = UBURMA_CMD_OUT_TYPE_INIT, + GET_TP_LIST_OUT_TP_HANDLE, + GET_TP_LIST_OUT_UDATA, + GET_TP_LIST_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_set_tp_attr, in/out type should be continuous */ +enum uburma_cmd_set_tp_attr_type { + /* In type */ + SET_TP_ATTR_IN_TP_HANLDE, + SET_TP_ATTR_IN_TP_ATTR_CNT, + SET_TP_ATTR_IN_TP_ATTR_BITMAP, + SET_TP_ATTR_IN_TP_ATTR, + SET_TP_ATTR_IN_UDATA, + SET_TP_ATTR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + SET_TP_ATTR_OUT_UDATA = UBURMA_CMD_OUT_TYPE_INIT, + SET_TP_ATTR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_get_tp_attr, in/out type should be continuous */ +enum uburma_cmd_get_tp_attr_type { + /* In type */ + GET_TP_ATTR_IN_TP_HANDLE, + GET_TP_ATTR_IN_UDATA, + GET_TP_ATTR_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + GET_TP_ATTR_OUT_TP_ATTR_CNT = UBURMA_CMD_OUT_TYPE_INIT, + GET_TP_ATTR_OUT_TP_ATTR_BITMAP, + GET_TP_ATTR_OUT_TP_ATTR, + GET_TP_ATTR_OUT_UDATA, + GET_TP_ATTR_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_jfce_wait, in/out type should be continuous */ +enum uburma_cmd_jfce_wait_type { + /* In type */ + JFCE_WAIT_IN_MAX_EVENT_CNT, + JFCE_WAIT_IN_TIME_OUT, + JFCE_WAIT_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + JFCE_WAIT_OUT_EVENT_CNT = UBURMA_CMD_OUT_TYPE_INIT, + JFCE_WAIT_OUT_EVENT_DATA, + JFCE_WAIT_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_async_event, in/out type should be continuous */ +enum uburma_cmd_async_event_type { + /* Out type */ + GET_ASYNC_EVENT_OUT_EVENT_TYPE = UBURMA_CMD_OUT_TYPE_INIT, + GET_ASYNC_EVENT_OUT_EVENT_DATA, + GET_ASYNC_EVENT_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_wait_notify, in/out type should be continuous */ +enum uburma_cmd_wait_notify_type { + /* In type */ + WAIT_NOTIFY_IN_CNT, + WAIT_NOTIFY_IN_TIMEOUT, + WAIT_NOTIFY_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + WAIT_NOTIFY_OUT_CNT = UBURMA_CMD_OUT_TYPE_INIT, + WAIT_NOTIFY_OUT_NOTIFY, + WAIT_NOTIFY_OUT_NUM, /* Only for calculating number of types */ +}; + +/* See struct uburma_cmd_exchange_tp_info, in/out type should be continuous */ +enum uburma_cmd_exchange_tp_info_type { + /* In type */ + EXCHANGE_TP_INFO_IN_FLAG, + EXCHANGE_TP_INFO_IN_TRANS_MODE, + EXCHANGE_TP_INFO_IN_LOCAL_EID, + EXCHANGE_TP_INFO_IN_PEER_EID, + EXCHANGE_TP_INFO_IN_TP_HANDLE, + EXCHANGE_TP_INFO_IN_TX_PSN, + EXCHANGE_TP_INFO_IN_NUM, /* Only for calculating number of types */ + /* Out type */ + EXCHANGE_TP_INFO_OUT_PEER_TP_HANDLE = UBURMA_CMD_OUT_TYPE_INIT, + EXCHANGE_TP_INFO_OUT_RX_PSN, + EXCHANGE_TP_INFO_OUT_NUM, /* Only for calculating number of types */ +}; + +int uburma_tlv_parse(struct uburma_cmd_hdr *hdr, void *arg); +int uburma_tlv_append(struct uburma_cmd_hdr *hdr, void *arg); + +int uburma_event_tlv_parse(struct uburma_cmd_hdr *hdr, void *arg); +int uburma_event_tlv_append(struct uburma_cmd_hdr *hdr, void *arg); + +#endif /* UBURMA_CMD_TLV_H */ diff --git a/drivers/ub/urma/uburma/uburma_dev_ops.c b/drivers/ub/urma/uburma/uburma_dev_ops.c new file mode 100644 index 000000000000..0d8884dbe475 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_dev_ops.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma device ops file + * Author: Qian Guoxin + * Create: 2021-08-04 + * Note: + * History: 2021-08-04: Create file + */ + +#include +#include +#include + +#include +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_uobj.h" +#include "uburma_cmd.h" +#include "uburma_mmap.h" + +static void uburma_mmu_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct uburma_mn *ub_mn = container_of(mn, struct uburma_mn, mn); + struct uburma_file *file = + container_of(ub_mn, struct uburma_file, ub_mn); + struct uburma_device *ubu_dev = file->ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + + uburma_log_debug("Start mmu release uobjs and ucontext\n"); + if (ub_mn->mm != mm || ub_mn->mm == NULL) { + uburma_log_debug("mm already released.\n"); + return; + } + ub_mn->mm = NULL; + + if (ubu_dev == NULL) { + uburma_log_err("ubu dev is null.\n"); + return; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + + down_write(&file->ucontext_rwsem); + + uburma_cleanup_uobjs(file, UBURMA_REMOVE_CLOSE); + if (file->ucontext) { + uburma_log_info("Start ubcore free ucontext.\n"); + if (ubc_dev) { + ubcore_free_ucontext(ubc_dev, file->ucontext); + file->ucontext = NULL; + } + } + up_write(&file->ucontext_rwsem); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + uburma_log_debug("Release uobjs and ucontext\n"); +} + +static const struct mmu_notifier_ops uburma_mm_notifier_ops = { + .release = uburma_mmu_release, +}; + +void uburma_unregister_mmu(struct uburma_file *file) +{ + struct uburma_mn *ub_mn = &file->ub_mn; + struct mm_struct *mm = ub_mn->mm; + + if (!mm) + return; + + file->ub_mn.mm = NULL; + mmu_notifier_unregister(&file->ub_mn.mn, mm); +} + +int uburma_register_mmu(struct uburma_file *file) +{ + struct uburma_mn *ub_mn = &file->ub_mn; + int ret = 0; + + ub_mn->mm = current->mm; + ub_mn->mn.ops = &uburma_mm_notifier_ops; + ret = mmu_notifier_register(&ub_mn->mn, current->mm); + if (ret) { + ub_mn->mm = NULL; + return ret; + } + + return 0; +} + +int uburma_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct uburma_file *file = filp->private_data; + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + struct uburma_umap_priv *priv; + int srcu_idx; + int ret; + + if (file == NULL || file->ucontext == NULL || file->ubu_dev == NULL) { + uburma_log_err("can not find ucontext.\n"); + return -EINVAL; + } + + ubu_dev = file->ubu_dev; + uburma_cmd_inc(ubu_dev); + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL || ubc_dev->ops == NULL || + ubc_dev->ops->mmap == NULL) { + uburma_log_err("can not find ubcore device.\n"); + ret = -ENODEV; + goto out; + } + + vma->vm_ops = uburma_get_umap_ops(); + ret = ubc_dev->ops->mmap(file->ucontext, vma); + if (!down_read_trylock(&file->cleanup_rwsem)) + goto out; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto unlock_read; + uburma_umap_priv_init(priv, vma); + +unlock_read: + up_read(&file->cleanup_rwsem); +out: + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + uburma_cmd_dec(ubu_dev); + return ret; +} + +void uburma_release_file(struct kref *ref) +{ + struct uburma_file *file = container_of(ref, struct uburma_file, ref); + struct ubcore_device *ubc_dev; + int srcu_idx; + + srcu_idx = srcu_read_lock(&file->ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(file->ubu_dev->ubc_dev, + &file->ubu_dev->ubc_dev_srcu); + if (ubc_dev && !ubc_dev->ops->disassociate_ucontext && + ubc_dev->ops->owner != NULL) + module_put(ubc_dev->ops->owner); + + srcu_read_unlock(&file->ubu_dev->ubc_dev_srcu, srcu_idx); + + uburma_unregister_mmu(file); + if (atomic_dec_and_test(&file->ubu_dev->refcnt)) + complete(&file->ubu_dev->comp); + + kobject_put(&file->ubu_dev->kobj); + if (file->fault_page) + __free_pages(file->fault_page, 0); + mutex_destroy(&file->umap_mutex); + kfree(file); +} + +int uburma_open(struct inode *inode, struct file *filp) +{ + struct uburma_device *ubu_dev; + struct ubcore_device *ubc_dev; + struct uburma_file *file; + int srcu_idx; + int ret; + + ubu_dev = container_of(inode->i_cdev, struct uburma_device, cdev); + if (!atomic_inc_not_zero(&ubu_dev->refcnt)) { + uburma_log_err("device was not ready.\n"); + return -ENXIO; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (ubc_dev == NULL) { + ret = EIO; + uburma_log_err("can not find ubcore device.\n"); + goto err; + } + + if (ubc_dev->ops->disassociate_ucontext == NULL && + ubc_dev->ops->owner != NULL) { + if (!try_module_get(ubc_dev->ops->owner)) { + ret = -ENODEV; + goto err; + } + } + + file = kzalloc(sizeof(struct uburma_file), GFP_KERNEL); + if (!file) { + ret = -ENOMEM; + uburma_log_err("can not alloc memory.\n"); + goto err; + } + + file->ubu_dev = ubu_dev; + file->ucontext = NULL; + kref_init(&file->ref); + init_rwsem(&file->ucontext_rwsem); + uburma_init_uobj_context(file); + mutex_init(&file->umap_mutex); + INIT_LIST_HEAD(&file->umaps_list); + filp->private_data = file; + ret = uburma_register_mmu(file); + if (ret != 0) { + uburma_log_err("fail to register mmu ret:%u\n", ret); + kfree(file); + goto err; + } + + kobject_get(&ubu_dev->kobj); // Increase reference count for file. + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + + mutex_lock(&ubu_dev->uburma_file_list_mutex); + list_add_tail(&file->list, &ubu_dev->uburma_file_list); + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + + return nonseekable_open(inode, filp); + +err: + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + return ret; +} + +int uburma_close(struct inode *inode, struct file *filp) +{ + struct uburma_file *file = filp->private_data; + struct uburma_device *ubu_dev = file->ubu_dev; + struct ubcore_device *ubc_dev; + int srcu_idx; + + if (ubu_dev == NULL) { + uburma_log_err("ubu dev is null.\n"); + return -EINVAL; + } + + srcu_idx = srcu_read_lock(&ubu_dev->ubc_dev_srcu); + ubc_dev = srcu_dereference(ubu_dev->ubc_dev, &ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + uburma_log_info("ubcore device release in another proccess.\n"); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + return 0; + } + + mutex_lock(&ubu_dev->uburma_file_list_mutex); + if (!list_empty_careful(&file->list)) + list_del_init(&file->list); + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + + down_write(&file->ucontext_rwsem); + uburma_cleanup_uobjs(file, UBURMA_REMOVE_CLOSE); + if (file->ucontext) { + uburma_log_info("Start ubcore free ucontext.\n"); + ubcore_free_ucontext(ubc_dev, file->ucontext); + file->ucontext = NULL; + } + up_write(&file->ucontext_rwsem); + + uburma_log_debug("device: %s close.\n", ubc_dev->dev_name); + srcu_read_unlock(&ubu_dev->ubc_dev_srcu, srcu_idx); + + kref_put(&file->ref, uburma_release_file); + + return 0; +} diff --git a/drivers/ub/urma/uburma/uburma_event.c b/drivers/ub/urma/uburma/uburma_event.c new file mode 100644 index 000000000000..981d80039b75 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_event.c @@ -0,0 +1,818 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma event implementation + * Author: Yan Fangfang + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_cmd.h" +#include "uburma_uobj.h" +#include "uburma_cmd_tlv.h" +#include "uburma_file_ops.h" + +#include "uburma_event.h" + +#define UBURMA_JFCE_DELETE_EVENT 0 + +struct uburma_jfe_event { + struct list_head node; + uint32_t event_type; /* support async event */ + uint64_t event_data; + struct list_head obj_node; + uint32_t *counter; + uburma_jfe_event_data_free_fn event_data_free_fn; +}; + +struct uburma_jfce_uobj *uburma_get_jfce_uobj(int fd, struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + struct uburma_jfce_uobj *jfce; + + if (fd < 0) + return ERR_PTR(-ENOENT); + + uobj = uobj_get_read(UOBJ_CLASS_JFCE, fd, ufile); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("get jfce uobj fail with fd %d\n", fd); + return (void *)uobj; + } + + jfce = container_of(uobj, struct uburma_jfce_uobj, uobj); + uobj_get(uobj); // To keep the event file until jfce destroy. + uobj_put_read(uobj); + return jfce; +} + +void uburma_write_event_with_free_fn( + struct uburma_jfe *jfe, uint64_t event_data, uint32_t event_type, + struct list_head *obj_event_list, uint32_t *counter, + uburma_jfe_event_data_free_fn event_data_free_fn) +{ + struct uburma_jfe_event *event; + unsigned long flags; + + spin_lock_irqsave(&jfe->lock, flags); + if (jfe->deleting) { + spin_unlock_irqrestore(&jfe->lock, flags); + return; + } + event = kmalloc(sizeof(struct uburma_jfe_event), GFP_ATOMIC); + if (event == NULL) { + spin_unlock_irqrestore(&jfe->lock, flags); + return; + } + event->event_data = event_data; + event->event_type = event_type; + event->counter = counter; + event->event_data_free_fn = event_data_free_fn; + + list_add_tail(&event->node, &jfe->event_list); + if (obj_event_list) + list_add_tail(&event->obj_node, obj_event_list); + if (jfe->async_queue) + kill_fasync(&jfe->async_queue, SIGIO, POLL_IN); + spin_unlock_irqrestore(&jfe->lock, flags); + wake_up_interruptible(&jfe->poll_wait); +} + +void uburma_write_event(struct uburma_jfe *jfe, uint64_t event_data, + uint32_t event_type, struct list_head *obj_event_list, + uint32_t *counter) +{ + uburma_write_event_with_free_fn(jfe, event_data, event_type, + obj_event_list, counter, NULL); +} + +void uburma_jfce_handler(struct ubcore_jfc *jfc) +{ + struct uburma_jfc_uobj *jfc_uobj; + struct uburma_jfce_uobj *jfce; + bool write_event = false; + + if (jfc == NULL) + return; + + rcu_read_lock(); + jfc_uobj = rcu_dereference(jfc->jfc_cfg.jfc_context); + if (jfc_uobj != NULL && !IS_ERR(jfc_uobj) && !IS_ERR(jfc_uobj->jfce)) { + jfce = container_of(jfc_uobj->jfce, struct uburma_jfce_uobj, + uobj); + uburma_write_event(&jfce->jfe, jfc->urma_jfc, 0, + &jfc_uobj->comp_event_list, + &jfc_uobj->comp_events_reported); + write_event = true; + } + + rcu_read_unlock(); + if (write_event) + uburma_log_info("Finish to write jfc event, jfc_id: %u.\n", jfc->id); +} + +void uburma_uninit_jfe(struct uburma_jfe *jfe) +{ + struct list_head *p, *next; + struct uburma_jfe_event *event; + + spin_lock_irq(&jfe->lock); + list_for_each_safe(p, next, &jfe->event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + if (event->counter) + list_del(&event->obj_node); + if (event->event_data_free_fn != NULL) + (*(event->event_data_free_fn))(event->event_data); + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); +} + +static int uburma_delete_jfce(struct inode *inode, struct file *filp) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_file *ufile; + + if (uobj == NULL || uobj->ufile == NULL) + return 0; + + ufile = uobj->ufile; + down_write(&ufile->ucontext_rwsem); + + uobj_get(uobj); + /* will call uburma_hot_unplug_jfce if clean up is not going on */ + uburma_close_uobj_fd(filp); + uobj->ufile = NULL; + uobj_put(uobj); + up_write(&ufile->ucontext_rwsem); + kref_put(&ufile->ref, uburma_release_file); + + return 0; +} + +/* Read up to event_cnt events from jfe */ +static uint32_t uburma_read_jfe_event(struct uburma_jfe *jfe, + uint32_t event_cnt, + struct list_head *event_list) +{ + struct list_head *p, *next; + struct uburma_jfe_event *event; + uint32_t cnt = 0; + + spin_lock_irq(&jfe->lock); + + list_for_each_safe(p, next, &jfe->event_list) { + if (cnt == event_cnt) + break; + event = list_entry(p, struct uburma_jfe_event, node); + if (event->counter) { + ++(*event->counter); + list_del(&event->obj_node); + } + list_del(p); + list_add_tail(p, event_list); + cnt++; + } + spin_unlock_irq(&jfe->lock); + return cnt; +} + +static int uburma_wait_event_timeout(struct uburma_jfe *jfe, + unsigned long max_timeout, + uint32_t max_event_cnt, + uint32_t *event_cnt, + struct list_head *event_list) +{ + long timeout = (long)max_timeout; + + *event_cnt = 0; + while (!jfe->deleting) { + asm volatile("" : : : "memory"); + *event_cnt = + uburma_read_jfe_event(jfe, max_event_cnt, event_list); + /* Stop waiting once we have read at least one event */ + if (jfe->deleting) + return -EIO; + else if (*event_cnt > 0) + break; + /* + * 0 if the @condition evaluated to %false after the @timeout elapsed, + * 1 if the @condition evaluated to %true after the @timeout elapsed, + * the remaining jiffies (at least 1) if the @condition evaluated to true + * before the @timeout elapsed, + * or -%ERESTARTSYS if it was interrupted by a signal. + */ + timeout = wait_event_interruptible_timeout( + jfe->poll_wait, + (!list_empty(&jfe->event_list) || jfe->deleting), + (timeout)); + if (timeout <= 0) + return timeout; + } + + return 0; +} + +static int uburma_wait_event(struct uburma_jfe *jfe, bool nonblock, + uint32_t max_event_cnt, uint32_t *event_cnt, + struct list_head *event_list) +{ + int ret; + + *event_cnt = 0; + spin_lock_irq(&jfe->lock); + while (list_empty(&jfe->event_list)) { + spin_unlock_irq(&jfe->lock); + if (nonblock) + return -EAGAIN; + + /* The function will return -ERESTARTSYS if it was interrupted by a + * signal and 0 if @condition evaluated to true. + */ + ret = wait_event_interruptible(jfe->poll_wait, + (!list_empty(&jfe->event_list) || + jfe->deleting)); + if (ret != 0) + return ret; + + spin_lock_irq(&jfe->lock); + if (list_empty(&jfe->event_list) && jfe->deleting) { + spin_unlock_irq(&jfe->lock); + return -EIO; + } + } + spin_unlock_irq(&jfe->lock); + *event_cnt = uburma_read_jfe_event(jfe, max_event_cnt, event_list); + + return 0; +} + +static __poll_t uburma_jfe_poll(struct uburma_jfe *jfe, struct file *filp, + struct poll_table_struct *wait) +{ + __poll_t flag = 0; + + poll_wait(filp, &jfe->poll_wait, wait); + + spin_lock_irq(&jfe->lock); + if (!list_empty(&jfe->event_list)) + flag = EPOLLIN | EPOLLRDNORM; + + spin_unlock_irq(&jfe->lock); + + return flag; +} + +static __poll_t uburma_jfce_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfce_uobj *jfce = + container_of(uobj, struct uburma_jfce_uobj, uobj); + + return uburma_jfe_poll(&jfce->jfe, filp, wait); +} + +static int uburma_jfce_wait(struct uburma_jfce_uobj *jfce, struct file *filp, + struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_jfce_wait arg = { 0 }; + struct list_head event_list; + struct uburma_jfe_event *event; + uint32_t max_event_cnt; + uint32_t i = 0; + struct list_head *p, *next; + int ret; + + ret = uburma_event_tlv_parse(hdr, &arg); + if (ret != 0) + return -EFAULT; + + /* urma lib ensures that max_event_cnt > 0 */ + max_event_cnt = (arg.in.max_event_cnt < MAX_JFCE_EVENT_CNT ? + arg.in.max_event_cnt : + MAX_JFCE_EVENT_CNT); + INIT_LIST_HEAD(&event_list); + if (arg.in.time_out <= 0) { + ret = uburma_wait_event( + &jfce->jfe, + (filp->f_flags & O_NONBLOCK) | (arg.in.time_out == 0), + max_event_cnt, &arg.out.event_cnt, &event_list); + } else { + ret = uburma_wait_event_timeout( + &jfce->jfe, msecs_to_jiffies(arg.in.time_out), + max_event_cnt, &arg.out.event_cnt, &event_list); + } + + if (ret < 0) { + uburma_log_err("Failed to wait jfce event"); + return ret; + } + + list_for_each_safe(p, next, &event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + arg.out.event_data[i++] = event->event_data; + list_del(p); + kfree(event); + } + + if (arg.out.event_cnt > 0 && uburma_event_tlv_append(hdr, &arg) != 0) + return -EFAULT; + + return 0; +} + +static long uburma_jfce_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct uburma_cmd_hdr hdr; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfce_uobj *jfce = + container_of(uobj, struct uburma_jfce_uobj, uobj); + int ret; + + if (cmd == UBURMA_CMD_WAIT_JFC) { + ret = (int)copy_from_user(&hdr, (struct uburma_cmd_hdr *)arg, + sizeof(struct uburma_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBURMA_CMD_MAX_ARGS_SIZE) || + (hdr.args_len == 0 || hdr.args_addr == 0 || + hdr.command >= UBURMA_EVENT_CMD_MAX)) { + ret = -EINVAL; + } else { + ret = uburma_jfce_wait(jfce, filp, &hdr); + } + } else { + ret = -ENOIOCTLCMD; + } + return (long)ret; +} + +static int uburma_jfce_fasync(int fd, struct file *filp, int on) +{ + int ret; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfce_uobj *jfce = + container_of(uobj, struct uburma_jfce_uobj, uobj); + + if (uobj == NULL) + return -EINVAL; + spin_lock_irq(&jfce->jfe.lock); + ret = fasync_helper(fd, filp, on, &jfce->jfe.async_queue); + spin_unlock_irq(&jfce->jfe.lock); + return ret; +} + +const struct file_operations uburma_jfce_fops = { + .owner = THIS_MODULE, + .poll = uburma_jfce_poll, + .release = uburma_delete_jfce, + .unlocked_ioctl = uburma_jfce_ioctl, + .fasync = uburma_jfce_fasync, +}; + +void uburma_init_jfe(struct uburma_jfe *jfe) +{ + spin_lock_init(&jfe->lock); + INIT_LIST_HEAD(&jfe->event_list); + init_waitqueue_head(&jfe->poll_wait); + jfe->async_queue = NULL; +} + +static int uburma_delete_jfae(struct inode *inode, struct file *filp) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = + container_of(uobj, struct uburma_jfae_uobj, uobj); + struct uburma_file *ufile; + + if (uobj == NULL || jfae == NULL || uobj->ufile == NULL) + return 0; + + ufile = uobj->ufile; + down_write(&ufile->ucontext_rwsem); + + uobj_get(uobj); + /* call uburma_hot_unplug_jfae when cleanup is not going on */ + uburma_close_uobj_fd(filp); + uburma_uninit_jfe(&jfae->jfe); + uobj->ufile = NULL; + uobj_put(uobj); + up_write(&ufile->ucontext_rwsem); + kref_put(&ufile->ref, uburma_release_file); + + return 0; +} + +static __poll_t uburma_jfae_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = + container_of(uobj, struct uburma_jfae_uobj, uobj); + + return uburma_jfe_poll(&jfae->jfe, filp, wait); +} + +static inline void +uburma_set_async_event(struct uburma_cmd_async_event *async_event, + const struct uburma_jfe_event *event) +{ + async_event->event_data = event->event_data; + async_event->event_type = event->event_type; +} + +static int uburma_get_async_event(struct uburma_jfae_uobj *jfae, + struct file *filp, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_async_event arg = { 0 }; + struct list_head event_list; + struct uburma_jfe_event *event = NULL; + uint32_t event_cnt; + int ret; + + INIT_LIST_HEAD(&event_list); + ret = uburma_wait_event(&jfae->jfe, filp->f_flags & O_NONBLOCK, 1, + &event_cnt, &event_list); + if (ret < 0) + return ret; + + event = list_first_entry(&event_list, struct uburma_jfe_event, node); + if (event == NULL) + return -EIO; + + uburma_set_async_event(&arg, event); + list_del(&event->node); + kfree(event); + + if (event_cnt > 0 && uburma_event_tlv_append(hdr, &arg) != 0) + return -EFAULT; + + return 0; +} + +static long uburma_jfae_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct uburma_cmd_hdr hdr; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = + container_of(uobj, struct uburma_jfae_uobj, uobj); + int ret; + + if (cmd == UBURMA_CMD_GET_ASYNC_EVENT) { + ret = (int)copy_from_user(&hdr, (struct uburma_cmd_hdr *)arg, + sizeof(struct uburma_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBURMA_CMD_MAX_ARGS_SIZE) || + (hdr.args_len == 0 || hdr.args_addr == 0 || + hdr.command >= UBURMA_EVENT_CMD_MAX)) { + ret = -EINVAL; + } else { + ret = uburma_get_async_event(jfae, filp, &hdr); + } + } else { + ret = -ENOIOCTLCMD; + } + + return (long)ret; +} + +static int uburma_jfae_fasync(int fd, struct file *filp, int on) +{ + int ret; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_jfae_uobj *jfae = + container_of(uobj, struct uburma_jfae_uobj, uobj); + + if (uobj == NULL) + return -EINVAL; + spin_lock_irq(&jfae->jfe.lock); + ret = fasync_helper(fd, filp, on, &jfae->jfe.async_queue); + spin_unlock_irq(&jfae->jfe.lock); + return ret; +} + +const struct file_operations uburma_jfae_fops = { + .owner = THIS_MODULE, + .poll = uburma_jfae_poll, + .release = uburma_delete_jfae, + .unlocked_ioctl = uburma_jfae_ioctl, + .fasync = uburma_jfae_fasync, +}; + +static void uburma_async_event_callback(struct ubcore_event *event, + struct ubcore_event_handler *handler) +{ + struct uburma_jfae_uobj *jfae = + container_of(handler, struct uburma_jfae_uobj, event_handler); + + if (WARN_ON(IS_ERR_OR_NULL(jfae))) + return; + + uburma_write_event(&jfae->jfe, event->element.port_id, + event->event_type, NULL, NULL); +} + +static inline void +uburma_init_jfae_handler(struct ubcore_event_handler *handler) +{ + INIT_LIST_HEAD(&handler->node); + handler->event_callback = uburma_async_event_callback; +} + +void uburma_init_jfae(struct uburma_jfae_uobj *jfae, + struct ubcore_device *ubc_dev) +{ + uburma_init_jfe(&jfae->jfe); + uburma_init_jfae_handler(&jfae->event_handler); + ubcore_register_event_handler(ubc_dev, &jfae->event_handler); + jfae->dev = ubc_dev; +} + +void uburma_release_comp_event(struct uburma_jfce_uobj *jfce, + struct list_head *event_list) +{ + struct uburma_jfe *jfe = &jfce->jfe; + struct uburma_jfe_event *event, *tmp; + + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); +} + +void uburma_release_async_event(struct uburma_file *ufile, + struct list_head *event_list) +{ + struct uburma_jfae_uobj *jfae = ufile->ucontext->jfae; + struct uburma_jfe *jfe = &jfae->jfe; + struct uburma_jfe_event *event, *tmp; + + spin_lock_irq(&jfe->lock); + list_for_each_entry_safe(event, tmp, event_list, obj_node) { + list_del(&event->node); + kfree(event); + } + spin_unlock_irq(&jfe->lock); + uburma_put_jfae(ufile); +} + +int uburma_get_jfae(struct uburma_file *ufile) +{ + struct uburma_jfae_uobj *jfae; + + if (ufile->ucontext == NULL) { + uburma_log_err("ucontext is NULL"); + return -ENODEV; + } + + jfae = ufile->ucontext->jfae; + if (IS_ERR_OR_NULL(jfae)) { + uburma_log_err("Failed to get jfae"); + return -EINVAL; + } + + uobj_get(&jfae->uobj); + return 0; +} + +void uburma_put_jfae(struct uburma_file *ufile) +{ + struct uburma_jfae_uobj *jfae; + + if (ufile->ucontext == NULL) + return; + + jfae = ufile->ucontext->jfae; + if (IS_ERR_OR_NULL(jfae)) + return; + + uobj_put(&jfae->uobj); +} + +static void uburma_flush_notifier(struct uburma_uobj *uobj, + struct uburma_file *file) +{ + struct uburma_notifier_uobj *notifier = + container_of(uobj, struct uburma_notifier_uobj, uobj); + int incomplete_cnt; + uint32_t event_cnt; + struct uburma_jfe_event *event = NULL; + struct list_head event_list; + struct list_head *p, *next; + struct uburma_notify_event *notify; + int ret; + + INIT_LIST_HEAD(&event_list); + while ((incomplete_cnt = atomic_read(¬ifier->incomplete_cnt)) > 0) { + ret = uburma_wait_event(¬ifier->jfe, false, + (uint32_t)incomplete_cnt, &event_cnt, + &event_list); + if (notifier->jfe.deleting) + break; + if (ret < 0) + continue; + atomic_sub(event_cnt, ¬ifier->incomplete_cnt); + list_for_each_safe(p, next, &event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + notify = (struct uburma_notify_event *)(uintptr_t) + event->event_data; + if (notify->notify.status == 0) { + if (notify->notify.type == + UBURMA_IMPORT_JETTY_NOTIFY) + uburma_unimport_jetty( + file, false, + notify->tjetty_handle); + else + uburma_unbind_jetty( + file, false, + notify->jetty_handle, + notify->tjetty_handle); + } + kfree((struct uburma_notify_event *)(uintptr_t) + event->event_data); + list_del(p); + kfree(event); + } + } +} + +static int uburma_delete_notifier(struct inode *inode, struct file *filp) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_file *ufile; + + if (uobj == NULL || uobj->ufile == NULL) + return 0; + + uobj_get(uobj); + ufile = uobj->ufile; + uburma_flush_notifier(uobj, ufile); + + down_write(&ufile->ucontext_rwsem); + /* call uburma_hot_unplug_notifier when cleanup is not going on */ + uburma_close_uobj_fd(filp); + uobj->ufile = NULL; + uobj_put(uobj); + up_write(&ufile->ucontext_rwsem); + kref_put(&ufile->ref, uburma_release_file); + + return 0; +} + +static __poll_t uburma_notifier_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct uburma_uobj *uobj = filp->private_data; + struct uburma_notifier_uobj *notifier = + container_of(uobj, struct uburma_notifier_uobj, uobj); + + return uburma_jfe_poll(¬ifier->jfe, filp, wait); +} + +static int uburma_wait_notify(struct uburma_notifier_uobj *notifier, + struct file *filp, struct uburma_cmd_hdr *hdr) +{ + struct uburma_cmd_wait_notify arg = { 0 }; + struct list_head event_list; + struct uburma_jfe_event *event = NULL; + uint32_t event_cnt, max_event_cnt; + uint32_t i = 0; + struct list_head *p, *next; + int ret; + + ret = uburma_event_tlv_parse(hdr, &arg); + if (ret != 0) + return -EFAULT; + + max_event_cnt = + (arg.in.cnt < MAX_NOTIFY_CNT ? arg.in.cnt : MAX_NOTIFY_CNT); + INIT_LIST_HEAD(&event_list); + if (arg.in.timeout <= 0) { + ret = uburma_wait_event(¬ifier->jfe, + (filp->f_flags & O_NONBLOCK) | + (arg.in.timeout == 0), + max_event_cnt, &event_cnt, &event_list); + } else { + ret = uburma_wait_event_timeout( + ¬ifier->jfe, msecs_to_jiffies(arg.in.timeout), + max_event_cnt, &event_cnt, &event_list); + } + + if (ret < 0 && ret != -EAGAIN) { + uburma_log_err("Failed to wait notify event"); + return ret; + } + + arg.out.cnt = event_cnt; + atomic_sub(event_cnt, ¬ifier->incomplete_cnt); + list_for_each_safe(p, next, &event_list) { + event = list_entry(p, struct uburma_jfe_event, node); + arg.out.notify[i++] = ((struct uburma_notify_event *)(uintptr_t) + event->event_data) + ->notify; + kfree((struct uburma_notify_event *)(uintptr_t) + event->event_data); + list_del(p); + kfree(event); + } + + if (event_cnt > 0 && uburma_event_tlv_append(hdr, &arg) != 0) + return -EFAULT; + + return 0; +} + +static long uburma_notifier_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct uburma_cmd_hdr hdr; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_notifier_uobj *notifier = + container_of(uobj, struct uburma_notifier_uobj, uobj); + int ret; + + if (cmd == UBURMA_CMD_WAIT_NOTIFY) { + ret = (int)copy_from_user(&hdr, (struct uburma_cmd_hdr *)arg, + sizeof(struct uburma_cmd_hdr)); + if ((ret != 0) || (hdr.args_len > UBURMA_CMD_MAX_ARGS_SIZE) || + (hdr.args_len == 0 || hdr.args_addr == 0 || + hdr.command >= UBURMA_EVENT_CMD_MAX)) { + ret = -EINVAL; + } else { + ret = uburma_wait_notify(notifier, filp, &hdr); + } + } else { + ret = -ENOIOCTLCMD; + } + + return (long)ret; +} + +static int uburma_notifier_fasync(int fd, struct file *filp, int on) +{ + int ret; + struct uburma_uobj *uobj = filp->private_data; + struct uburma_notifier_uobj *notifier = + container_of(uobj, struct uburma_notifier_uobj, uobj); + + if (uobj == NULL) + return -EINVAL; + spin_lock_irq(¬ifier->jfe.lock); + ret = fasync_helper(fd, filp, on, ¬ifier->jfe.async_queue); + spin_unlock_irq(¬ifier->jfe.lock); + return ret; +} + +const struct file_operations uburma_notifier_fops = { + .owner = THIS_MODULE, + .poll = uburma_notifier_poll, + .release = uburma_delete_notifier, + .unlocked_ioctl = uburma_notifier_ioctl, + .fasync = uburma_notifier_fasync, +}; + +struct uburma_notifier_uobj *uburma_get_notifier_uobj(int fd, + struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + struct uburma_notifier_uobj *notifier; + + if (fd < 0) + return ERR_PTR(-ENOENT); + + uobj = uobj_get_read(UOBJ_CLASS_NOTIFIER, fd, ufile); + if (IS_ERR_OR_NULL(uobj)) { + uburma_log_err("get notifier uobj fail with fd %d\n", fd); + return (void *)uobj; + } + + notifier = container_of(uobj, struct uburma_notifier_uobj, uobj); + uobj_get(uobj); // To keep the event file until notifier destroy. + uobj_put_read(uobj); + return notifier; +} diff --git a/drivers/ub/urma/uburma/uburma_event.h b/drivers/ub/urma/uburma/uburma_event.h new file mode 100644 index 000000000000..a2c8ef9eee72 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_event.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma event header + * Author: Yan Fangfang + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#ifndef UBURMA_EVENT_H +#define UBURMA_EVENT_H + +#include +#include "uburma_uobj.h" + +void uburma_init_jfe(struct uburma_jfe *jfe); +void uburma_uninit_jfe(struct uburma_jfe *jfe); + +typedef void (*uburma_jfe_event_data_free_fn)(uint64_t event_data); + +void uburma_write_event_with_free_fn( + struct uburma_jfe *jfe, uint64_t event_data, uint32_t event_type, + struct list_head *obj_event_list, uint32_t *counter, + uburma_jfe_event_data_free_fn event_data_free_fn); +void uburma_write_event(struct uburma_jfe *jfe, uint64_t event_data, + uint32_t event_type, struct list_head *obj_event_list, + uint32_t *counter); + +struct uburma_jfce_uobj *uburma_get_jfce_uobj(int fd, + struct uburma_file *ufile); +void uburma_jfce_handler(struct ubcore_jfc *jfc); +void uburma_release_comp_event(struct uburma_jfce_uobj *jfce, + struct list_head *event_list); + +void uburma_init_jfae(struct uburma_jfae_uobj *jfae, + struct ubcore_device *ubc_dev); +void uburma_release_async_event(struct uburma_file *ufile, + struct list_head *event_list); +int uburma_get_jfae(struct uburma_file *ufile); +void uburma_put_jfae(struct uburma_file *ufile); + +struct uburma_notifier_uobj * +uburma_get_notifier_uobj(int fd, struct uburma_file *ufile); +#endif /* UBURMA_EVENT_H */ diff --git a/drivers/ub/urma/uburma/uburma_file_ops.h b/drivers/ub/urma/uburma/uburma_file_ops.h new file mode 100644 index 000000000000..ee704c5ce404 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_file_ops.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma device file ops file + * Author: Qian Guoxin + * Create: 2021-8-4 + * Note: + * History: 2021-8-4: Create file + */ + +#ifndef UBURMA_FILE_OPS_H +#define UBURMA_FILE_OPS_H + +#include +#include +#include + +void uburma_release_file(struct kref *ref); +int uburma_mmap(struct file *filp, struct vm_area_struct *vma); +int uburma_open(struct inode *inode, struct file *filp); +int uburma_close(struct inode *inode, struct file *filp); +long uburma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#endif /* UBURMA_FILE_OPS_H */ diff --git a/drivers/ub/urma/uburma/uburma_log.c b/drivers/ub/urma/uburma/uburma_log.c new file mode 100644 index 000000000000..34f09e6f0207 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_log.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma log file + * Author: Qian Guoxin + * Create: 2024-2-5 + * Note: + * History: 2024-2-5: Create file + */ + +#include +#include "uburma_log.h" + +uint32_t g_uburma_log_level = UBURMA_LOG_LEVEL_WARNING; diff --git a/drivers/ub/urma/uburma/uburma_log.h b/drivers/ub/urma/uburma/uburma_log.h new file mode 100644 index 000000000000..95356579dcf7 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_log.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma log head file + * Author: Qian Guoxin + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + */ + +#ifndef UBURMA_LOG_H +#define UBURMA_LOG_H + +#include +#include + +enum uburma_log_level { + UBURMA_LOG_LEVEL_EMERG = 0, + UBURMA_LOG_LEVEL_ALERT = 1, + UBURMA_LOG_LEVEL_CRIT = 2, + UBURMA_LOG_LEVEL_ERR = 3, + UBURMA_LOG_LEVEL_WARNING = 4, + UBURMA_LOG_LEVEL_NOTICE = 5, + UBURMA_LOG_LEVEL_INFO = 6, + UBURMA_LOG_LEVEL_DEBUG = 7, + UBURMA_LOG_LEVEL_MAX = 8, +}; + +/* add log head info, "LogTag_UBURMA|function|[line]| */ +#define UBURMA_LOG_TAG "LogTag_UBURMA" +#define uburma_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBURMA_LOG_TAG, __func__, __LINE__, ##args) + +#define UBURMA_RATELIMIT_INTERVAL (5 * HZ) +#define UBURMA_RATELIMIT_BURST 100 + +extern uint32_t g_uburma_log_level; + +#define uburma_log_info(...) \ + do { \ + if (g_uburma_log_level >= UBURMA_LOG_LEVEL_INFO) \ + uburma_log(info, __VA_ARGS__); \ + } while (0) + +#define uburma_log_err(...) \ + do { \ + if (g_uburma_log_level >= UBURMA_LOG_LEVEL_ERR) \ + uburma_log(err, __VA_ARGS__); \ + } while (0) + +#define uburma_log_warn(...) \ + do { \ + if (g_uburma_log_level >= UBURMA_LOG_LEVEL_WARNING) \ + uburma_log(warn, __VA_ARGS__); \ + } while (0) + +/* No need to record debug log by printk_ratelimited */ +#define uburma_log_debug(...) \ + do { \ + if (g_uburma_log_level >= UBURMA_LOG_LEVEL_DEBUG) \ + uburma_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define uburma_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBURMA_RATELIMIT_INTERVAL, \ + UBURMA_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_uburma_log_level >= UBURMA_LOG_LEVEL_INFO)) \ + uburma_log(info, __VA_ARGS__); \ + } while (0) + +#define uburma_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBURMA_RATELIMIT_INTERVAL, \ + UBURMA_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_uburma_log_level >= UBURMA_LOG_LEVEL_ERR)) \ + uburma_log(err, __VA_ARGS__); \ + } while (0) + +#define uburma_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBURMA_RATELIMIT_INTERVAL, \ + UBURMA_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_uburma_log_level >= UBURMA_LOG_LEVEL_WARNING)) \ + uburma_log(warn, __VA_ARGS__); \ + } while (0) + +#endif /* UBURMA_LOG_H */ diff --git a/drivers/ub/urma/uburma/uburma_main.c b/drivers/ub/urma/uburma/uburma_main.c new file mode 100644 index 000000000000..4c11c098699d --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_main.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma kernel module + * Author: Qian Guoxin + * Create: 2021-08-03 + * Note: + * History: 2021-08-03: Create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "uburma_log.h" +#include "uburma_types.h" +#include "uburma_file_ops.h" +#include "uburma_uobj.h" +#include "uburma_cmd.h" +#include "uburma_mmap.h" + +#define UBURMA_LOG_FILE_PERMISSION (0644) + +module_param(g_uburma_log_level, uint, UBURMA_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_uburma_log_level, " 3: ERR, 4: WARNING, 6: INFO, 7: DEBUG"); + +#define UBURMA_MAX_DEVICE 1024 +#define UBURMA_DYNAMIC_MINOR_NUM UBURMA_MAX_DEVICE +#define UBURMA_MODULE_NAME "uburma" +#define UBURMA_DEVNODE_MODE (0666) + +static DECLARE_BITMAP(g_dev_bitmap, UBURMA_MAX_DEVICE); + +static dev_t g_dynamic_uburma_dev; + +static atomic_t + g_dev_cnt; /* When cnt becomes 0, it will wake up dev_flush_comp */ +static struct completion g_dev_flush_comp; + +static char *uburma_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBURMA_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "uburma/%s", dev_name(dev)); +} + +static struct class g_uburma_class = { + .name = UBURMA_MODULE_NAME, + .devnode = uburma_devnode, +}; + +static const struct file_operations g_uburma_fops = { + .owner = THIS_MODULE, + // .write = uburma_write, + .mmap = uburma_mmap, + .open = uburma_open, + .release = uburma_close, + .llseek = no_llseek, + .unlocked_ioctl = uburma_ioctl, + .compat_ioctl = uburma_ioctl, +}; + +static int uburma_add_device(struct ubcore_device *ubc_dev); +static void uburma_remove_device(struct ubcore_device *ubc_dev, + void *client_ctx); +static void uburma_stop_device(struct ubcore_device *ubc_dev, void *client_ctx); +static struct ubcore_client g_urma_client = { + .list_node = LIST_HEAD_INIT(g_urma_client.list_node), + .client_name = "urma", + .add = uburma_add_device, + .remove = uburma_remove_device, + .stop = uburma_stop_device, +}; + +static void uburma_release_dev(struct kobject *kobj) +{ + struct uburma_device *ubu_dev = + container_of(kobj, struct uburma_device, kobj); + + cleanup_srcu_struct(&ubu_dev->ubc_dev_srcu); + kfree(ubu_dev); +} + +static const struct kobj_type uburma_dev_ktype = { + .release = uburma_release_dev, +}; + +static int uburma_get_devt(dev_t *devt) +{ + unsigned int devnum; + + devnum = (unsigned int)find_first_zero_bit(g_dev_bitmap, + UBURMA_MAX_DEVICE); + if (devnum >= UBURMA_MAX_DEVICE) { + uburma_log_err("Invalid argument.\n"); + return -ENOMEM; + } + set_bit(devnum, g_dev_bitmap); + *devt = g_dynamic_uburma_dev + devnum; + return 0; +} + +static int uburma_device_create(struct uburma_device *ubu_dev, + struct ubcore_device *ubc_dev) +{ + /* create /sys/class/uburma/dev_name> */ + ubu_dev->dev = device_create(&g_uburma_class, ubc_dev->dev.parent, + ubu_dev->cdev.dev, ubu_dev, "%s", + ubc_dev->dev_name); + if (IS_ERR(ubu_dev->dev)) { + uburma_log_err("device create failed, device:%s.\n", + ubc_dev->dev_name); + return -ENOMEM; + } + + return 0; +} + +static void uburma_device_destroy(struct uburma_device *ubu_dev) +{ + device_destroy(&g_uburma_class, ubu_dev->cdev.dev); + ubu_dev->dev = NULL; +} + +static int uburma_cdev_create(struct uburma_device *ubu_dev, + struct ubcore_device *ubc_dev) +{ + dev_t base; + + if (uburma_get_devt(&base) != 0) { + uburma_log_err("Invalid argument.\n"); + return -ENOMEM; + } + ubu_dev->devnum = base - g_dynamic_uburma_dev; + + cdev_init(&ubu_dev->cdev, NULL); + ubu_dev->cdev.owner = THIS_MODULE; + ubu_dev->cdev.ops = &g_uburma_fops; + ubu_dev->cdev.kobj.parent = &ubu_dev->kobj; + (void)kobject_set_name(&ubu_dev->cdev.kobj, "%s", ubc_dev->dev_name); + + /* create /dev/uburma/dev_name> */ + if (cdev_add(&ubu_dev->cdev, base, 1)) + goto free_bit; + + if (uburma_device_create(ubu_dev, ubc_dev) != 0) { + uburma_log_err("device create failed, device:%s.\n", + ubc_dev->dev_name); + goto del_cdev; + } + + return 0; + +del_cdev: + cdev_del(&ubu_dev->cdev); +free_bit: + clear_bit(ubu_dev->devnum, g_dev_bitmap); + return -EPERM; +} + +static int uburma_add_device(struct ubcore_device *ubc_dev) +{ + struct uburma_device *ubu_dev; + int ret; + + uburma_log_info("uburma add dev:%s\n", ubc_dev->dev_name); + + ubu_dev = kzalloc(sizeof(struct uburma_device), GFP_KERNEL); + if (ubu_dev == NULL) + return -ENOMEM; + + uburma_uobj_batch_del_enable(ubc_dev->ops, + ubu_dev->batch_attr.enable_batch_class, + &ubu_dev->batch_attr.is_batch); + ret = init_srcu_struct(&ubu_dev->ubc_dev_srcu); + if (ret != 0) { + kfree(ubu_dev); + return -EPERM; + } + + atomic_set(&ubu_dev->refcnt, 1); + init_completion(&ubu_dev->comp); + + /* cmd cnt and completion for ioctl and mmap cmds */ + atomic_set(&ubu_dev->cmdcnt, 1); + init_completion(&ubu_dev->cmddone); + + kobject_init(&ubu_dev->kobj, &uburma_dev_ktype); + mutex_init(&ubu_dev->uburma_file_list_mutex); + INIT_LIST_HEAD(&ubu_dev->uburma_file_list); + + rcu_assign_pointer(ubu_dev->ubc_dev, ubc_dev); + + if (uburma_cdev_create(ubu_dev, ubc_dev) != 0) { + uburma_log_err("can not create cdev.\n"); + goto err; + } + + ubcore_set_client_ctx_data(ubc_dev, &g_urma_client, ubu_dev); + atomic_inc(&g_dev_cnt); + uburma_log_info("uburma finish add dev:%s\n", ubc_dev->dev_name); + return 0; + +err: + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + + wait_for_completion(&ubu_dev->comp); + mutex_destroy(&ubu_dev->uburma_file_list_mutex); + kfree(ubu_dev); + return -EPERM; +} + +static void uburma_free_ucontext(struct uburma_device *ubu_dev, + struct ubcore_device *ubc_dev) +{ + struct uburma_file *file; + + uburma_log_debug("Start uburma free ucontext.\n"); + rcu_assign_pointer(ubu_dev->ubc_dev, NULL); + synchronize_srcu(&ubu_dev->ubc_dev_srcu); + + uburma_log_debug("Rcu synchronize finish.\n"); + mutex_lock(&ubu_dev->uburma_file_list_mutex); + while (list_empty(&ubu_dev->uburma_file_list) == false) { + struct ubcore_ucontext *ucontext; + + file = list_first_entry(&ubu_dev->uburma_file_list, + struct uburma_file, list); + list_del_init(&file->list); + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + + down_write(&file->ucontext_rwsem); + uburma_cleanup_uobjs(file, UBURMA_REMOVE_DRIVER_REMOVE); + ucontext = file->ucontext; + file->ucontext = NULL; + if (ucontext != NULL) { + if (ubc_dev->ops->disassociate_ucontext != NULL) + ubc_dev->ops->disassociate_ucontext(ucontext); + + uburma_log_info("Start ubcore free ucontext.\n"); + ubcore_free_ucontext(ubc_dev, ucontext); + } + + up_write(&file->ucontext_rwsem); + (void)kref_put(&file->ref, uburma_release_file); + + mutex_lock(&ubu_dev->uburma_file_list_mutex); + } + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + mutex_destroy(&ubu_dev->uburma_file_list_mutex); +} + +static void uburma_remove_device(struct ubcore_device *ubc_dev, + void *client_ctx) +{ + struct uburma_device *ubu_dev = client_ctx; + + if (ubu_dev == NULL) + return; + + uburma_device_destroy(ubu_dev); + cdev_del(&ubu_dev->cdev); + clear_bit(ubu_dev->devnum, g_dev_bitmap); + + uburma_free_ucontext(ubu_dev, ubc_dev); + + if (atomic_dec_and_test(&ubu_dev->refcnt)) + complete(&ubu_dev->comp); + + if (ubc_dev->ops->disassociate_ucontext == NULL) + wait_for_completion(&ubu_dev->comp); + + /* do not wait_for_completion(&ubu_dev->comp) */ + uburma_cmd_flush(ubu_dev); + kobject_put(&ubu_dev->kobj); + + if (atomic_dec_and_test(&g_dev_cnt)) + complete(&g_dev_flush_comp); + + uburma_log_info("uburma finish rmv dev:%s\n", ubc_dev->dev_name); +} + +/* The driver needs to stay and resolve the memory mapping first, */ +/* and then release the jetty resources. */ +static void uburma_stop_device(struct ubcore_device *ubc_dev, void *client_ctx) +{ + struct uburma_device *ubu_dev = client_ctx; + struct uburma_file *cur_file, *next_file; + + if (ubu_dev == NULL || list_empty(&ubu_dev->uburma_file_list)) + return; + + uburma_log_debug("Start uburma stop device.\n"); + rcu_assign_pointer(ubu_dev->ubc_dev, NULL); + synchronize_srcu(&ubu_dev->ubc_dev_srcu); + uburma_log_debug("Rcu synchronize finish.\n"); + + mutex_lock(&ubu_dev->uburma_file_list_mutex); + list_for_each_entry_safe(cur_file, next_file, + &ubu_dev->uburma_file_list, list) { + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + down_write(&cur_file->ucontext_rwsem); + uburma_unmap_vma_pages(cur_file); + up_write(&cur_file->ucontext_rwsem); + mutex_lock(&ubu_dev->uburma_file_list_mutex); + } + mutex_unlock(&ubu_dev->uburma_file_list_mutex); + uburma_log_info("uburma finish stop dev:%s\n", ubc_dev->dev_name); +} + +static void uburma_register_client(void) +{ + int ret; + + ret = ubcore_register_client(&g_urma_client); + if (ret != 0) + uburma_log_err("register client failed, ret: %d.\n", ret); + else + uburma_log_info("register client succeed.\n"); +} + +static void uburma_unregister_client(void) +{ + ubcore_unregister_client(&g_urma_client); + uburma_log_info("unregister client succeed.\n"); +} + +static int uburma_class_create(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_dynamic_uburma_dev, 0, + UBURMA_DYNAMIC_MINOR_NUM, UBURMA_MODULE_NAME); + if (ret != 0) { + uburma_log_err("couldn't register dynamic device number.\n"); + goto out; + } + + /* create /sys/class/uburma */ + ret = class_register(&g_uburma_class); + if (ret) { + uburma_log_err("couldn't create class %s.\n", + UBURMA_MODULE_NAME); + goto out_chrdev; + } + + return 0; +out_chrdev: + unregister_chrdev_region(g_dynamic_uburma_dev, + UBURMA_DYNAMIC_MINOR_NUM); +out: + return ret; +} + +static void uburma_class_destroy(void) +{ + class_unregister(&g_uburma_class); + unregister_chrdev_region(g_dynamic_uburma_dev, + UBURMA_DYNAMIC_MINOR_NUM); +} + +static int __init uburma_init(void) +{ + int ret; + + ret = uburma_class_create(); + if (ret != 0) { + uburma_log_err("uburma dev create failed.\n"); + return ret; + } + + atomic_set(&g_dev_cnt, 0); + init_completion(&g_dev_flush_comp); + + uburma_register_client(); + + uburma_log_info("uburma module init success.\n"); + return 0; +} + +static void __exit uburma_exit(void) +{ + uburma_unregister_client(); + + // wait all uburma dev unregister + if (atomic_read(&g_dev_cnt) > 0) + wait_for_completion(&g_dev_flush_comp); + + uburma_class_destroy(); + uburma_log_info("uburma module exits.\n"); +} + +module_init(uburma_init); +module_exit(uburma_exit); + +MODULE_DESCRIPTION("Kernel module for urma client"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ub/urma/uburma/uburma_mmap.c b/drivers/ub/urma/uburma/uburma_mmap.c new file mode 100644 index 000000000000..c6a10ea0d4e8 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_mmap.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma mmap module + * Author: Wen Chen + * Create: 2024-03-18 + * Note: + * History: 2024-03-18: Create file + */ + +#include +#include + +#include + +#include "uburma_log.h" +#include "uburma_types.h" + +void uburma_umap_priv_init(struct uburma_umap_priv *priv, + struct vm_area_struct *vma) +{ + struct uburma_file *ufile = vma->vm_file->private_data; + + priv->vma = vma; + vma->vm_private_data = priv; + + mutex_lock(&ufile->umap_mutex); + list_add(&priv->node, &ufile->umaps_list); + mutex_unlock(&ufile->umap_mutex); +} + +void uburma_unmap_vma_pages(struct uburma_file *ufile) +{ + struct uburma_umap_priv *priv, *next_priv; + struct mm_struct *mm; + + if (list_empty(&ufile->umaps_list)) + return; + + lockdep_assert_held(&ufile->cleanup_rwsem); + while (1) { + struct list_head local_list; + + INIT_LIST_HEAD(&local_list); + mm = NULL; + mutex_lock(&ufile->umap_mutex); + list_for_each_entry_safe(priv, next_priv, &ufile->umaps_list, + node) { + struct mm_struct *curr_mm = priv->vma->vm_mm; + + if (!mm) { + if (!mmget_not_zero(curr_mm)) { + list_del_init(&priv->node); + continue; + } + mm = curr_mm; + list_move_tail(&priv->node, &local_list); + } else if (curr_mm == mm) { + list_move_tail(&priv->node, &local_list); + } + } + mutex_unlock(&ufile->umap_mutex); + + if (list_empty(&local_list)) { + if (mm) + mmput(mm); + return; + } + + mmap_read_lock(mm); + + list_for_each_entry_safe(priv, next_priv, &local_list, node) { + struct vm_area_struct *vma = priv->vma; + + list_del_init(&priv->node); + if (vma->vm_mm == mm) + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mmap_read_unlock(mm); + + mmput(mm); + } +} + +static void uburma_umap_open(struct vm_area_struct *vma) +{ + struct uburma_file *ufile = vma->vm_file->private_data; + struct uburma_umap_priv *priv; + + if (!down_read_trylock(&ufile->cleanup_rwsem)) + goto out_zap; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto out_unlock; + + uburma_umap_priv_init(priv, vma); + + up_read(&ufile->cleanup_rwsem); + return; + +out_unlock: + up_read(&ufile->cleanup_rwsem); +out_zap: + vma->vm_private_data = NULL; + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void uburma_umap_close(struct vm_area_struct *vma) +{ + struct uburma_file *ufile = vma->vm_file->private_data; + struct uburma_umap_priv *priv = vma->vm_private_data; + + if (!priv) + return; + + mutex_lock(&ufile->umap_mutex); + list_del(&priv->node); + mutex_unlock(&ufile->umap_mutex); + kfree(priv); + vma->vm_private_data = NULL; +} + +static vm_fault_t uburma_umap_fault(struct vm_fault *vmf) +{ + struct uburma_file *ufile = vmf->vma->vm_file->private_data; + struct uburma_umap_priv *priv = vmf->vma->vm_private_data; + struct page *page; + + if (unlikely(!priv)) + return VM_FAULT_SIGBUS; + + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) { + vmf->page = ZERO_PAGE(0); + get_page(vmf->page); + return 0; + } + + page = READ_ONCE(ufile->fault_page); + if (likely(page)) { + vmf->page = page; + get_page(vmf->page); + return 0; + } + + mutex_lock(&ufile->umap_mutex); + if (!ufile->fault_page) { + ufile->fault_page = alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0); + if (!ufile->fault_page) { + mutex_unlock(&ufile->umap_mutex); + return VM_FAULT_SIGBUS; + } + } + vmf->page = ufile->fault_page; + get_page(vmf->page); + mutex_unlock(&ufile->umap_mutex); + + return 0; +} + +static const struct vm_operations_struct g_urma_umap_ops = { + .open = uburma_umap_open, + .close = uburma_umap_close, + .fault = uburma_umap_fault, +}; + +const struct vm_operations_struct *uburma_get_umap_ops(void) +{ + return (const struct vm_operations_struct *)&g_urma_umap_ops; +} diff --git a/drivers/ub/urma/uburma/uburma_mmap.h b/drivers/ub/urma/uburma/uburma_mmap.h new file mode 100644 index 000000000000..b4bb1e6e78eb --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_mmap.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uburma mmap head file + * Author: Wen Chen + * Create: 2024-03-18 + * Note: + * History: 2024-03-18: Create file + */ + +#ifndef UBURMA_MMAP_H +#define UBURMA_MMAP_H + +#include + +#include "uburma_types.h" + +void uburma_unmap_vma_pages(struct uburma_file *ufile); +const struct vm_operations_struct *uburma_get_umap_ops(void); +void uburma_umap_priv_init(struct uburma_umap_priv *priv, + struct vm_area_struct *vma); + +#endif /* UBURMA_MMAP_H */ diff --git a/drivers/ub/urma/uburma/uburma_types.h b/drivers/ub/urma/uburma/uburma_types.h new file mode 100644 index 000000000000..bcb81f54a14e --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_types.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Types definition provided by uburma + * Author: Qian Guoxin + * Create: 2021-8-4 + * Note: + * History: 2021-8-4: Create file + */ + +#ifndef UBURMA_TYPES_H +#define UBURMA_TYPES_H + +#include +#include +#include +#include +#include +#include +#include +#include + +enum uburma_remove_reason { + /* Userspace requested uobject deletion. Call could fail */ + UBURMA_REMOVE_DESTROY, + /* Context deletion. This call should delete the actual object itself */ + UBURMA_REMOVE_CLOSE, + /* Driver is being hot-unplugged. This call should delete the actual object itself */ + UBURMA_REMOVE_DRIVER_REMOVE, + /* Context is being cleaned-up, but commit was just completed */ + UBURMA_REMOVE_DURING_CLEANUP +}; + +struct uburma_mn { + struct mmu_notifier mn; + struct mm_struct *mm; +}; + +struct uburma_file { + struct kref ref; + struct rw_semaphore ucontext_rwsem; + + struct uburma_device *ubu_dev; + struct ubcore_ucontext *ucontext; + + /* uobj */ + struct mutex uobjects_lock; + struct list_head uobjects; + struct idr idr; + spinlock_t idr_lock; + struct rw_semaphore cleanup_rwsem; + enum uburma_remove_reason cleanup_reason; + + struct list_head list; + struct mutex umap_mutex; + struct list_head umaps_list; + struct page *fault_page; + struct uburma_mn ub_mn; +}; + +enum BATCH_DELETE_ID { + BATCH_DELETE_JETTY, + BATCH_DELETE_JFS, + BATCH_DELETE_JFR, + BATCH_DELETE_JFC, + BATCH_DELETE_NUM +}; + +struct uburma_uobj_batch_attr { + bool enable_batch_class[BATCH_DELETE_NUM]; + bool is_batch; +}; + +struct uburma_device { + atomic_t refcnt; + struct completion comp; /* When refcnt becomes 0, it will wake up */ + atomic_t cmdcnt; /* number of unfinished ioctl and mmap cmds */ + struct completion + cmddone; /* When cmdcnt becomes 0, cmddone will wake up */ + unsigned int devnum; + struct cdev cdev; + struct device *dev; + struct ubcore_device *__rcu ubc_dev; + struct srcu_struct ubc_dev_srcu; /* protect ubc_dev */ + struct kobject kobj; /* when equal to 0 , free uburma_device. */ + struct uburma_uobj_batch_attr batch_attr; + struct mutex uburma_file_list_mutex; /* protect uburma_file_list */ + struct list_head uburma_file_list; +}; + +struct uburma_umap_priv { + struct vm_area_struct *vma; + struct list_head node; +}; + +#endif /* UBURMA_TYPES_H */ diff --git a/drivers/ub/urma/uburma/uburma_uobj.c b/drivers/ub/urma/uburma/uburma_uobj.c new file mode 100644 index 000000000000..68cb7cd20438 --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_uobj.c @@ -0,0 +1,1332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uobj framework in uburma + * Author: Chen yujie + * Create: 2022-09-07 + * Note: + * History: 2022-09-07: create file + */ + +#include +#include +#include +#include + +#include +#include + +#include "uburma_types.h" +#include "uburma_file_ops.h" +#include "uburma_log.h" +#include "uburma_event.h" +#include "uburma_mmap.h" +#include "uburma_uobj.h" + +static bool g_is_zero_fd; + +static void uobj_free(struct kref *ref) +{ + kfree_rcu(container_of(ref, struct uburma_uobj, ref), rcu); +} + +struct uburma_uobj *uobj_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile, int class_id) +{ + struct uburma_uobj *uobj; + + /* Cleanup is running. Calling this should have been impossible */ + if (!down_read_trylock(&ufile->cleanup_rwsem)) { + uburma_log_warn( + "uobj: cleanup is running while allocating an uobject\n"); + return ERR_PTR(-EIO); + } + uobj = type->type_class->alloc_begin(type, ufile); + uobj->class_id = class_id; + if (IS_ERR(uobj)) + up_read(&ufile->cleanup_rwsem); + return uobj; +} + +void uobj_alloc_commit(struct uburma_uobj *uobj) +{ + /* relase write lock */ + atomic_set(&uobj->rcnt, 0); + + /* add uobj to list */ + mutex_lock(&uobj->ufile->uobjects_lock); + list_add(&uobj->list, &uobj->ufile->uobjects); + mutex_unlock(&uobj->ufile->uobjects_lock); + + uobj->type->type_class->alloc_commit(uobj); + + up_read(&uobj->ufile->cleanup_rwsem); +} + +void uobj_alloc_abort(struct uburma_uobj *uobj) +{ + uburma_log_debug("uobj allocation is aborted: %s.\n", __func__); + uobj->type->type_class->alloc_abort(uobj); + up_read(&uobj->ufile->cleanup_rwsem); +} + +void uobj_get(struct uburma_uobj *uobj) +{ + kref_get(&uobj->ref); +} + +void uobj_put(struct uburma_uobj *uobj) +{ + kref_put(&uobj->ref, uobj_free); +} + +void uobj_put_batch(struct uburma_uobj **uobj_arr, int arr_num) +{ + int i; + + for (i = 0; i < arr_num; ++i) + kref_put(&uobj_arr[i]->ref, uobj_free); +} + +/* Alloc buffer and init params. */ +static struct uburma_uobj *alloc_uobj(struct uburma_file *ufile, + const struct uobj_type *type) +{ + struct ubcore_device *ubc_dev; + struct uburma_uobj *uobj; + + /* block read and write uobj if we are removing device */ + ubc_dev = srcu_dereference(ufile->ubu_dev->ubc_dev, + &ufile->ubu_dev->ubc_dev_srcu); + if (!ubc_dev) + return ERR_PTR(-EIO); + + uobj = kzalloc(type->obj_size, GFP_KERNEL); + if (uobj == NULL) + return ERR_PTR(-ENOMEM); + + uobj->ufile = ufile; + uobj->type = type; + + atomic_set(&uobj->rcnt, -1); + kref_init(&uobj->ref); + INIT_LIST_HEAD(&uobj->list); + + return uobj; +} + +static int uobj_alloc_idr(struct uburma_uobj *uobj) +{ + int ret; + + idr_preload(GFP_KERNEL); + spin_lock(&uobj->ufile->idr_lock); + + /* Alloc idr pointing to NULL. Will replace it once we commit. */ + ret = idr_alloc(&uobj->ufile->idr, NULL, 1, + min_t(unsigned long, U32_MAX - 1U, INT_MAX), + GFP_NOWAIT); + if (ret >= 0) + uobj->id = ret; + + spin_unlock(&uobj->ufile->idr_lock); + idr_preload_end(); + + return ret < 0 ? ret : 0; +} + +static void uobj_remove_idr(struct uburma_uobj *uobj) +{ + spin_lock(&uobj->ufile->idr_lock); + idr_remove(&uobj->ufile->idr, uobj->id); + spin_unlock(&uobj->ufile->idr_lock); +} + +static int uobj_try_lock(struct uburma_uobj *uobj, bool exclusive) +{ + /* + * When a shared access is required, we use a positive counter. Each + * shared access request checks that the value != -1 and increment it. + * Exclusive access is required for operations like write or destroy. + * In exclusive access mode, we check that the counter is zero (nobody + * claimed this object) and we set it to -1. Releasing a shared access + * lock is done simply by decreasing the counter. As for exclusive + * access locks, since only a single one of them is allowed + * concurrently, setting the counter to zero is enough for releasing + * this lock. + */ + if (!exclusive) + return atomic_add_unless(&uobj->rcnt, 1, -1) ? 0 : -EBUSY; + + /* lock is either WRITE or DESTROY - should be exclusive */ + return atomic_cmpxchg(&uobj->rcnt, 0, -1) == 0 ? 0 : -EBUSY; +} + +static void uobj_unlock(struct uburma_uobj *uobj, bool exclusive) +{ + /* + * In order to unlock an object, either decrease its rcnt for + * read access or zero it in case of exclusive access. See + * uverbs_try_lock_object for locking schema information. + */ + if (!exclusive) + atomic_dec(&uobj->rcnt); + else + atomic_set(&uobj->rcnt, 0); +} + +static int __must_check uobj_remove_commit_internal( + struct uburma_uobj *uobj, enum uburma_remove_reason why) +{ + struct uburma_file *ufile = uobj->ufile; + int ret; + + ret = uobj->type->type_class->remove_commit(uobj, why); + if (ret && why == UBURMA_REMOVE_DESTROY) { + /* We couldn't remove the object, so just unlock the uobject */ + atomic_set(&uobj->rcnt, 0); + uobj->type->type_class->lookup_put(uobj, UOBJ_ACCESS_NOLOCK); + } else if (!list_empty(&uobj->list)) { + mutex_lock(&ufile->uobjects_lock); + list_del_init(&uobj->list); + mutex_unlock(&ufile->uobjects_lock); + /* put the ref we took when we created the object */ + uobj_put(uobj); + } + + return ret; +} + +static int __must_check +uobj_remove_commit_internal_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_index, enum uburma_remove_reason why) +{ + struct uburma_file *ufile = uobj_arr[0]->ufile; + struct uburma_uobj *uobj = uobj_arr[0]; + int bad_uobj_index; + int end_index; + int ret; + int i; + + ret = uobj->type->type_class->remove_commit_ex(uobj_arr, arr_num, + bad_index, why); + bad_uobj_index = *bad_index; + if (ret == -EINVAL || ret == -EBUSY) + bad_uobj_index = 0; + + if (ret && why == UBURMA_REMOVE_DESTROY) { + for (i = bad_uobj_index; i < arr_num; ++i) { + uobj = uobj_arr[i]; + atomic_set(&uobj->rcnt, 0); + uobj->type->type_class->lookup_put(uobj, + UOBJ_ACCESS_NOLOCK); + } + end_index = bad_uobj_index; + } else { + end_index = arr_num; + } + + for (i = 0; i < end_index; ++i) { + uobj = uobj_arr[i]; + if (!list_empty(&uobj->list)) { + mutex_lock(&ufile->uobjects_lock); + list_del_init(&uobj->list); + mutex_unlock(&ufile->uobjects_lock); + /* put the ref we took when we created the object */ + uobj_put(uobj); + } + } + + return ret; +} + +static int uobj_cg_try_charge(struct uburma_uobj *uobj) +{ + return ubcore_cgroup_try_charge(&uobj->cg_obj, + uobj->ufile->ucontext->ub_dev, + UBCORE_RESOURCE_HCA_OBJECT); +} + +static void uboj_cg_uncharge(struct uburma_uobj *uobj) +{ + ubcore_cgroup_uncharge(&uobj->cg_obj, uobj->ufile->ucontext->ub_dev, + UBCORE_RESOURCE_HCA_OBJECT); +} + +static struct uburma_uobj *uobj_idr_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile) +{ + struct uburma_uobj *uobj; + int ret; + + uobj = alloc_uobj(ufile, type); + if (IS_ERR(uobj)) + return uobj; + + ret = uobj_alloc_idr(uobj); + if (ret) + goto put_obj; + + ret = uobj_cg_try_charge(uobj); + if (ret != 0) { + uburma_log_warn("cgroup charge failed"); + goto remove; + } + return uobj; + +remove: + uobj_remove_idr(uobj); +put_obj: + uobj_put(uobj); + return ERR_PTR(ret); +} + +static void uobj_idr_alloc_commit(struct uburma_uobj *uobj) +{ + spin_lock(&uobj->ufile->idr_lock); + WARN_ON(idr_replace(&uobj->ufile->idr, uobj, uobj->id)); + spin_unlock(&uobj->ufile->idr_lock); +} + +static void uobj_idr_alloc_abort(struct uburma_uobj *uobj) +{ + uboj_cg_uncharge(uobj); + uobj_remove_idr(uobj); + uobj_put(uobj); +} + +static struct uburma_uobj *uobj_idr_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, + int id, enum uobj_access flag) +{ + struct uburma_uobj *uobj = NULL; + + rcu_read_lock(); + /* Object won't be released as we're protected in rcu. */ + uobj = idr_find(&ufile->idr, id); + if (uobj == NULL) { + uobj = ERR_PTR(-ENOENT); + goto free; + } + + /* Object associated with uobj may have been released. */ + if (!kref_get_unless_zero(&uobj->ref)) + uobj = ERR_PTR(-ENOENT); + +free: + rcu_read_unlock(); + return uobj; +} + +static void uobj_idr_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + /* Empty for now. */ +} + +static int __must_check uobj_idr_remove_commit(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + const struct uobj_idr_type *idr_type = + container_of(uobj->type, struct uobj_idr_type, type); + /* Call object destroy function. */ + int ret = idr_type->destroy_func(uobj, why); + + /* Only user req destroy may fail. */ + if (why == UBURMA_REMOVE_DESTROY && ret) + return ret; + + uboj_cg_uncharge(uobj); + uobj_remove_idr(uobj); + return ret; +} + +static int __must_check +uobj_idr_remove_commit_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_index, enum uburma_remove_reason why) +{ + const struct uobj_idr_ex_type *idr_type = + container_of(uobj_arr[0]->type, struct uobj_idr_ex_type, type); + struct uburma_uobj *uobj = NULL; + int ret; + int i; + + /* Call object destroy function. */ + ret = idr_type->destroy_batch_func(uobj_arr, arr_num, bad_index, why); + /* Only user req destroy may fail. */ + if (why == UBURMA_REMOVE_DESTROY && ret) + return ret; + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + uboj_cg_uncharge(uobj); + uobj_remove_idr(uobj); + } + + return ret; +} + +static struct uburma_uobj *uobj_fd_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile) +{ + const struct uobj_fd_type *fd_type = + container_of(type, struct uobj_fd_type, type); + struct uburma_uobj *uobj; + struct file *filp; + int new_fd; + + new_fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (new_fd < 0) + return ERR_PTR(new_fd); + + if (new_fd == 0) { + new_fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (new_fd < 0) + return ERR_PTR(new_fd); + g_is_zero_fd = true; + } + + uobj = alloc_uobj(ufile, type); + if (IS_ERR(uobj)) { + put_unused_fd(new_fd); + return uobj; + } + + filp = anon_inode_getfile(fd_type->name, fd_type->fops, uobj, + fd_type->flags); + if (IS_ERR(filp)) { + put_unused_fd(new_fd); + uobj_put(uobj); + return (void *)filp; + } + + uobj->id = new_fd; + uobj->object = filp; + + kref_get(&ufile->ref); + + return uobj; +} + +static void uobj_fd_alloc_commit(struct uburma_uobj *uobj) +{ + struct file *filp = (struct file *)uobj->object; + + /* Do not set uobj->id = 0 as it may be read when remove uobj */ + + /* Get another reference as we export this to the fops */ + uobj_get(uobj); + + fd_install(uobj->id, filp); +} + +static void uobj_fd_alloc_abort(struct uburma_uobj *uobj) +{ + struct file *filp = uobj->object; + + /* Unsuccessful NEW */ + fput(filp); + put_unused_fd(uobj->id); +} + +static struct uburma_uobj *uobj_fd_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag) +{ + const struct uobj_fd_type *fd_type = + container_of(type, struct uobj_fd_type, type); + struct uburma_uobj *uobj; + struct file *f; + + if (flag != UOBJ_ACCESS_READ) + return ERR_PTR(-EOPNOTSUPP); + + f = fget(id); + if (f == NULL) + return ERR_PTR(-EBADF); + + uobj = f->private_data; + /* + * fget(id) ensures we are not currently running close_fd, + * and the caller is expected to ensure that close_fd is never + * done while a call top lookup is possible. + */ + if (f->f_op != fd_type->fops) { + fput(f); + return ERR_PTR(-EBADF); + } + + uobj_get(uobj); + return uobj; +} + +static void uobj_fd_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + struct file *filp = uobj->object; + + WARN_ON(flag != UOBJ_ACCESS_READ); + /* This indirectly calls close_fd and free the object */ + fput(filp); +} + +static int __must_check uobj_fd_remove_commit(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + const struct uobj_fd_type *fd_type = + container_of(uobj->type, struct uobj_fd_type, type); + /* Call user close function. */ + int ret = fd_type->context_closed(uobj, why); + + if (why == UBURMA_REMOVE_DESTROY && ret) + return ret; + + if (why == UBURMA_REMOVE_DURING_CLEANUP) { + uobj_fd_alloc_abort(uobj); + return ret; + } + + return ret; +} + +void uburma_uobj_batch_del_enable(struct ubcore_ops *ops, + bool *enable_batch_class, bool *is_batch) +{ + if (ops->destroy_jetty_batch != NULL) { + uburma_log_debug("destroy jetty batch exists\n"); + enable_batch_class[BATCH_DELETE_JETTY] = true; + *is_batch = true; + } + + if (ops->destroy_jfs_batch != NULL) { + uburma_log_debug("destroy jfs batch exists\n"); + enable_batch_class[BATCH_DELETE_JFS] = true; + *is_batch = true; + } + + if (ops->destroy_jfr_batch != NULL) { + uburma_log_debug("destroy jfr batch exists\n"); + enable_batch_class[BATCH_DELETE_JFR] = true; + *is_batch = true; + } + + if (ops->destroy_jfc_batch != NULL) { + uburma_log_debug("destroy jfc batch exists\n"); + enable_batch_class[BATCH_DELETE_JFC] = true; + *is_batch = true; + } +} + +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag) +{ + struct ubcore_device *ubc_dev; + struct uburma_uobj *uobj; + int ret; + + uobj = type->type_class->lookup_get(type, ufile, id, flag); + if (IS_ERR(uobj)) + return uobj; + + if (uobj->type != type) { + ret = -EINVAL; + goto free; + } + + /* block read and write uobj if we are removing device */ + ubc_dev = srcu_dereference(ufile->ubu_dev->ubc_dev, + &ufile->ubu_dev->ubc_dev_srcu); + if (!ubc_dev) { + ret = -EIO; + goto free; + } + + if (flag == UOBJ_ACCESS_NOLOCK) + return uobj; + + ret = uobj_try_lock(uobj, flag == UOBJ_ACCESS_WRITE); + if (ret) { + WARN(ufile->cleanup_reason, + "uburma: Trying to lookup_get while cleanup context\n"); + goto free; + } + + return uobj; +free: + uobj->type->type_class->lookup_put(uobj, flag); + /* pair with uobj_get in uobj_fd_lookup_get */ + uobj_put(uobj); + return ERR_PTR(ret); +} + +void uobj_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag) +{ + uobj->type->type_class->lookup_put(uobj, flag); + + if (flag != UOBJ_ACCESS_NOLOCK) + uobj_unlock( + uobj, + flag == UOBJ_ACCESS_WRITE); /* match with uobj_try_lock */ + + uobj_put(uobj); +} + +int __must_check uobj_remove_commit(struct uburma_uobj *uobj) +{ + struct uburma_file *ufile = uobj->ufile; + int ret; + + down_read(&ufile->cleanup_rwsem); + /* try Lock uobj for write with cleanup_rwsem locked */ + ret = uobj_try_lock(uobj, true); + if (ret) { + up_read(&ufile->cleanup_rwsem); + uburma_log_warn("Failed to lock uobj\n"); + return ret; + } + + ret = uobj_remove_commit_internal(uobj, UBURMA_REMOVE_DESTROY); + up_read(&ufile->cleanup_rwsem); + return ret; +} + +int __must_check uobj_remove_commit_batch(struct uburma_uobj **uobj_arr, + int arr_num, int *bad_index) +{ + struct uburma_file *ufile = NULL; + struct uburma_uobj *uobj = NULL; + int ret; + int i; + + if (arr_num > 0) { + uobj = uobj_arr[0]; + ufile = uobj->ufile; + down_read(&ufile->cleanup_rwsem); + } + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + ufile = uobj->ufile; + /* try Lock uobj for write with cleanup_rwsem locked */ + ret = uobj_try_lock(uobj, true); + if (ret) { + up_read(&ufile->cleanup_rwsem); + uburma_log_warn("Failed to lock uobj\n"); + return ret; + } + } + + ret = uobj_remove_commit_internal_batch(uobj_arr, arr_num, bad_index, + UBURMA_REMOVE_DESTROY); + + if (arr_num > 0) + up_read(&ufile->cleanup_rwsem); + + return ret; +} + +void uburma_init_uobj_context(struct uburma_file *ufile) +{ + g_is_zero_fd = false; + ufile->cleanup_reason = 0; + idr_init(&ufile->idr); + spin_lock_init(&ufile->idr_lock); + INIT_LIST_HEAD(&ufile->uobjects); + mutex_init(&ufile->uobjects_lock); + init_rwsem(&ufile->cleanup_rwsem); +} + +static inline void do_clean_uobj(struct uburma_uobj *obj, + unsigned int cur_order, + enum uburma_remove_reason why) +{ + int ret; + /* if we hit this WARN_ON, + * that means we are racing with a lookup_get. + */ + WARN_ON(uobj_try_lock(obj, true)); + ret = obj->type->type_class->remove_commit(obj, why); + if (ret) + pr_warn("uburma: failed to remove uobject id %d order %u\n", + obj->id, cur_order); + + list_del_init(&obj->list); + + /* uburma_close_uobj_fd will also try lock the uobj for write */ + if (uobj_type_is_fd(obj)) + uobj_unlock(obj, true); /* match with uobj_try_lock */ + + /* put the ref we took when we created the object */ + uobj_put(obj); +} + +static void do_clean_uobj_batch(struct uburma_uobj **obj_arr, int arr_num, + unsigned int cur_order, + enum uburma_remove_reason why) +{ + struct uburma_uobj *obj = NULL; + int bad_index = 0; + int ret; + int i; + + for (i = 0; i < arr_num; ++i) + WARN_ON(uobj_try_lock(obj_arr[i], true)); + ret = obj_arr[0]->type->type_class->remove_commit_ex(obj_arr, arr_num, + &bad_index, why); + if (ret) + pr_warn("[batch]uburma: failed to remove uobject id %d order %u\n", + obj_arr[0]->id, cur_order); + for (i = 0; i < arr_num; ++i) { + obj = obj_arr[i]; + list_del_init(&obj->list); + /* uburma_close_uobj_fd will also try lock the uobj for write */ + if (uobj_type_is_fd(obj)) + uobj_unlock(obj, true); /* match with uobj_try_lock */ + /* put the ref we took when we created the object */ + uobj_put(obj); + } +} + +void uburma_cleanup_uobjs(struct uburma_file *ufile, + enum uburma_remove_reason why) +{ + /* ubcore_device is forbidden to be dereferenced from uburma_device */ + struct uburma_device *ubu_dev = ufile->ubu_dev; + struct uburma_uobj **obj_arr[BATCH_DELETE_NUM]; + int arr_num[BATCH_DELETE_NUM] = { 0 }; + struct uburma_uobj *obj, *next_obj; + unsigned int cur_order = 0; + unsigned int next_order; + int class_id; + int len = 0; + int i; + + ufile->cleanup_reason = why; + down_write(&ufile->cleanup_rwsem); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + uburma_unmap_vma_pages(ufile); + + if (ubu_dev->batch_attr.is_batch) { + mutex_lock(&ufile->uobjects_lock); + list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, + list) { + len++; + } + mutex_unlock(&ufile->uobjects_lock); + + for (i = 0; i < BATCH_DELETE_NUM; ++i) { + obj_arr[i] = kcalloc(len, sizeof(struct uburma_uobj *), + GFP_KERNEL); + if (obj_arr[i] == NULL) + ubu_dev->batch_attr.enable_batch_class[i] = + false; + } + } + + while (!list_empty(&ufile->uobjects)) { + next_order = UINT_MAX; + mutex_lock(&ufile->uobjects_lock); + list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, + list) { + if (obj->type->destroy_order == cur_order) { + class_id = obj->class_id; + if (class_id == UOBJ_CLASS_JETTY && + ubu_dev->batch_attr.enable_batch_class + [BATCH_DELETE_JETTY]) { + obj_arr[BATCH_DELETE_JETTY] + [arr_num[BATCH_DELETE_JETTY]] = + obj; + arr_num[BATCH_DELETE_JETTY]++; + continue; + } else if (class_id == UOBJ_CLASS_JFS && + ubu_dev->batch_attr.enable_batch_class + [BATCH_DELETE_JFS]) { + obj_arr[BATCH_DELETE_JFS] + [arr_num[BATCH_DELETE_JFS]] = + obj; + arr_num[BATCH_DELETE_JFS]++; + continue; + } else if (class_id == UOBJ_CLASS_JFR && + ubu_dev->batch_attr.enable_batch_class + [BATCH_DELETE_JFR]) { + obj_arr[BATCH_DELETE_JFR] + [arr_num[BATCH_DELETE_JFR]] = + obj; + arr_num[BATCH_DELETE_JFR]++; + continue; + } else if (class_id == UOBJ_CLASS_JFC && + ubu_dev->batch_attr.enable_batch_class + [BATCH_DELETE_JFC]) { + obj_arr[BATCH_DELETE_JFC] + [arr_num[BATCH_DELETE_JFC]] = + obj; + arr_num[BATCH_DELETE_JFC]++; + continue; + } else { + do_clean_uobj(obj, cur_order, why); + } + } else + next_order = min(next_order, + obj->type->destroy_order); + } + + for (i = 0; i < BATCH_DELETE_NUM; ++i) { + if (arr_num[i] != 0) { + do_clean_uobj_batch(obj_arr[i], arr_num[i], + cur_order, why); + arr_num[i] = 0; + } + } + + mutex_unlock(&ufile->uobjects_lock); + cur_order = next_order; + } + + for (i = 0; i < BATCH_DELETE_NUM; ++i) { + if (ubu_dev->batch_attr.enable_batch_class[i]) + kfree(obj_arr[i]); + } + + if (g_is_zero_fd == true) { + put_unused_fd(0); + g_is_zero_fd = false; + } + + up_write(&ufile->cleanup_rwsem); +} + +static int uburma_free_token_id(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + return ubcore_free_token_id((struct ubcore_token_id *)uobj->object); +} + +static int uburma_free_seg(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + return ubcore_unregister_seg((struct ubcore_target_seg *)uobj->object); +} + +static int uburma_free_jfc(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jfc_uobj *jfc_uobj = + container_of(uobj, struct uburma_jfc_uobj, uobj); + struct ubcore_jfc *jfc = (struct ubcore_jfc *)uobj->object; + struct uburma_jfce_uobj *jfce_uobj; + uint32_t jfc_id = jfc->id; + int ret; + + ret = ubcore_delete_jfc(jfc); + if (ret) + return ret; + + if (!IS_ERR(jfc_uobj->jfce)) { + jfce_uobj = container_of(jfc_uobj->jfce, + struct uburma_jfce_uobj, uobj); + uburma_release_comp_event(jfce_uobj, + &jfc_uobj->comp_event_list); + uobj_put(jfc_uobj->jfce); + } + + uburma_release_async_event(uobj->ufile, &jfc_uobj->async_event_list); + uburma_log_info("Finish to delete jfc: %u.\n", jfc_id); + return ret; +} + +static int uburma_free_jfc_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_jfc_index, + enum uburma_remove_reason why) +{ + struct uburma_jfc_uobj **jfc_uobj_arr = NULL; + struct uburma_jfce_uobj *jfce_uobj = NULL; + struct uburma_jfc_uobj *jfc_uobj = NULL; + struct ubcore_jfc **jfc_arr = NULL; + struct uburma_uobj *uobj = NULL; + int end_index; + int ret; + int i; + + jfc_uobj_arr = + kcalloc(arr_num, sizeof(struct uburma_jfc_uobj *), GFP_KERNEL); + if (jfc_uobj_arr == NULL) + return -ENOMEM; + jfc_arr = kcalloc(arr_num, sizeof(struct ubcore_jfc *), GFP_KERNEL); + if (jfc_arr == NULL) { + kfree(jfc_uobj_arr); + return -ENOMEM; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + jfc_uobj_arr[i] = + container_of(uobj, struct uburma_jfc_uobj, uobj); + jfc_arr[i] = (struct ubcore_jfc *)uobj->object; + uburma_log_info("Delete jfc batch, jfc_id: %u, index: %u.\n", jfc_arr[i]->id, i); + } + + ret = ubcore_delete_jfc_batch(jfc_arr, arr_num, bad_jfc_index); + if (ret) + end_index = *bad_jfc_index; + else + end_index = arr_num; + + uburma_log_info("Delete jfc batch, ret: %d, bad_jfc_index: %d.\n", ret, *bad_jfc_index); + if (ret != -EINVAL && ret != -EBUSY) { + for (i = 0; i < end_index; ++i) { + jfc_uobj = jfc_uobj_arr[i]; + uobj = uobj_arr[i]; + if (!IS_ERR(jfc_uobj->jfce)) { + jfce_uobj = container_of( + jfc_uobj->jfce, struct uburma_jfce_uobj, + uobj); + uburma_release_comp_event( + jfce_uobj, &jfc_uobj->comp_event_list); + uobj_put(jfc_uobj->jfce); + } + uburma_release_async_event(uobj->ufile, + &jfc_uobj->async_event_list); + } + } + + kfree(jfc_uobj_arr); + kfree(jfc_arr); + return ret; +} + +static int uburma_free_jfs(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jfs_uobj *jfs_uobj = + container_of(uobj, struct uburma_jfs_uobj, uobj); + int ret; + + ret = ubcore_delete_jfs((struct ubcore_jfs *)uobj->object); + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jfs_uobj->async_event_list); + return ret; +} + +static int uburma_free_jfs_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_jfs_index, + enum uburma_remove_reason why) +{ + struct uburma_jfs_uobj **jfs_uobj_arr = NULL; + struct uburma_jfs_uobj *jfs_uobj = NULL; + struct ubcore_jfs **jfs_arr = NULL; + struct uburma_uobj *uobj = NULL; + int end_index; + int ret; + int i; + + jfs_uobj_arr = + kcalloc(arr_num, sizeof(struct uburma_jfs_uobj *), GFP_KERNEL); + if (jfs_uobj_arr == NULL) + return -ENOMEM; + jfs_arr = kcalloc(arr_num, sizeof(struct ubcore_jfs *), GFP_KERNEL); + if (jfs_arr == NULL) { + kfree(jfs_uobj_arr); + return -ENOMEM; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + jfs_uobj_arr[i] = + container_of(uobj, struct uburma_jfs_uobj, uobj); + jfs_arr[i] = (struct ubcore_jfs *)uobj->object; + } + + ret = ubcore_delete_jfs_batch(jfs_arr, arr_num, bad_jfs_index); + if (ret) + end_index = *bad_jfs_index; + else + end_index = arr_num; + + if (ret != -EINVAL && ret != -EBUSY) { + for (i = 0; i < end_index; ++i) { + jfs_uobj = jfs_uobj_arr[i]; + uobj = uobj_arr[i]; + uburma_release_async_event(uobj->ufile, + &jfs_uobj->async_event_list); + } + } + + kfree(jfs_uobj_arr); + kfree(jfs_arr); + return ret; +} + +static int uburma_free_jfr(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jfr_uobj *jfr_uobj = + container_of(uobj, struct uburma_jfr_uobj, uobj); + int ret; + + ret = ubcore_delete_jfr((struct ubcore_jfr *)uobj->object); + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jfr_uobj->async_event_list); + return ret; +} + +static int uburma_free_jfr_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_jfr_index, + enum uburma_remove_reason why) +{ + struct uburma_jfr_uobj **jfr_uobj_arr = NULL; + struct uburma_jfr_uobj *jfr_uobj = NULL; + struct ubcore_jfr **jfr_arr = NULL; + struct uburma_uobj *uobj = NULL; + int end_index; + int ret; + int i; + + jfr_uobj_arr = + kcalloc(arr_num, sizeof(struct uburma_jfr_uobj *), GFP_KERNEL); + if (jfr_uobj_arr == NULL) + return -ENOMEM; + jfr_arr = kcalloc(arr_num, sizeof(struct ubcore_jfr *), GFP_KERNEL); + if (jfr_arr == NULL) { + kfree(jfr_uobj_arr); + return -ENOMEM; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + jfr_uobj_arr[i] = + container_of(uobj, struct uburma_jfr_uobj, uobj); + jfr_arr[i] = (struct ubcore_jfr *)uobj->object; + } + + ret = ubcore_delete_jfr_batch(jfr_arr, arr_num, bad_jfr_index); + if (ret) + end_index = *bad_jfr_index; + else + end_index = arr_num; + + if (ret != -EINVAL && ret != -EBUSY) { + for (i = 0; i < end_index; ++i) { + jfr_uobj = jfr_uobj_arr[i]; + uobj = uobj_arr[i]; + uburma_release_async_event(uobj->ufile, + &jfr_uobj->async_event_list); + } + } + + kfree(jfr_uobj_arr); + kfree(jfr_arr); + return ret; +} + +static int uburma_free_jetty(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jetty_uobj *jetty_uobj = + container_of(uobj, struct uburma_jetty_uobj, uobj); + int ret; + + ret = ubcore_delete_jetty((struct ubcore_jetty *)uobj->object); + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, &jetty_uobj->async_event_list); + return ret; +} + +static int uburma_free_jetty_batch(struct uburma_uobj **uobj_arr, int arr_num, + int *bad_jetty_index, + enum uburma_remove_reason why) +{ + struct uburma_jetty_uobj **jetty_uobj_arr = NULL; + struct uburma_jetty_uobj *jetty_uobj = NULL; + struct ubcore_jetty **jetty_arr = NULL; + struct uburma_uobj *uobj = NULL; + int end_index; + int ret; + int i; + + jetty_uobj_arr = kcalloc(arr_num, sizeof(struct uburma_jetty_uobj *), + GFP_KERNEL); + if (jetty_uobj_arr == NULL) + return -ENOMEM; + jetty_arr = kcalloc(arr_num, sizeof(struct ubcore_jetty *), GFP_KERNEL); + if (jetty_arr == NULL) { + kfree(jetty_uobj_arr); + return -ENOMEM; + } + + for (i = 0; i < arr_num; ++i) { + uobj = uobj_arr[i]; + jetty_uobj_arr[i] = + container_of(uobj, struct uburma_jetty_uobj, uobj); + jetty_arr[i] = (struct ubcore_jetty *)uobj->object; + } + + ret = ubcore_delete_jetty_batch(jetty_arr, arr_num, bad_jetty_index); + if (ret) + end_index = *bad_jetty_index; + else + end_index = arr_num; + + if (ret != -EINVAL && ret != -EBUSY) { + for (i = 0; i < end_index; ++i) { + jetty_uobj = jetty_uobj_arr[i]; + uobj = uobj_arr[i]; + uburma_release_async_event( + uobj->ufile, &jetty_uobj->async_event_list); + } + } + + kfree(jetty_uobj_arr); + kfree(jetty_arr); + return ret; +} + +static int uburma_free_jetty_grp(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jetty_grp_uobj *jetty_grp_uobj = + container_of(uobj, struct uburma_jetty_grp_uobj, uobj); + int ret; + + ret = ubcore_delete_jetty_grp( + (struct ubcore_jetty_group *)uobj->object); + if (ret) + return ret; + + uburma_release_async_event(uobj->ufile, + &jetty_grp_uobj->async_event_list); + return ret; +} + +static int uburma_free_tjfr(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + return ubcore_unimport_jfr((struct ubcore_tjetty *)uobj->object); +} + +static void uburma_free_tjetty_refcnt(struct uburma_tjetty_uobj *uburma_tjetty) +{ + struct ubcore_jetty *jetty; + + jetty = (struct ubcore_jetty *)uburma_tjetty->jetty_uobj->uobj.object; + if (jetty->remote_jetty != NULL) { + atomic_set(&jetty->remote_jetty->use_cnt, 0); + jetty->remote_jetty = NULL; + } +} + +static int uburma_free_tjetty(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_tjetty_uobj *uburma_tjetty; + + uburma_tjetty = (struct uburma_tjetty_uobj *)uobj; + if (uburma_tjetty->jetty_uobj != NULL) { + (void)ubcore_unbind_jetty( + uburma_tjetty->jetty_uobj->uobj.object); + /* When resetting the network card, socket messages cannot be sent. + * Unbind jetty cannot succeed. The tjetty reference needs to be released, + * otherwise unimport jetty cannot succeed. + */ + uburma_free_tjetty_refcnt(uburma_tjetty); + uburma_tjetty->jetty_uobj = NULL; + uburma_log_warn_rl( + "unbind_jetty hasn't been done and it has been handled, why: %d.\n", + (int)why); + } + if (uobj->object == NULL) + return 0; + else if (uburma_tjetty->should_unimport_async) + return ubcore_unimport_jetty_async( + (struct ubcore_tjetty *)uobj->object, 0, NULL); + else + return ubcore_unimport_jetty( + (struct ubcore_tjetty *)uobj->object); +} + +static int uburma_free_tseg(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + return ubcore_unimport_seg((struct ubcore_target_seg *)uobj->object); +} + +void uburma_close_uobj_fd(struct file *f) +{ + struct uburma_uobj *uobj = f->private_data; + struct uburma_file *ufile = uobj->ufile; + int ret; + + if (down_read_trylock(&ufile->cleanup_rwsem)) { + /* + * uobj_fd_lookup_get holds the kref on the struct file any + * time a FD uobj is locked, which prevents this release + * method from being invoked. Meaning we can always get the + * write lock here, or we have a kernel bug. + */ + WARN_ON(uobj_try_lock(uobj, true)); + ret = uobj_remove_commit_internal(uobj, UBURMA_REMOVE_CLOSE); + up_read(&ufile->cleanup_rwsem); + if (ret) + pr_warn("uburma: unable to clean up uobj file.\n"); + } + + /* Pairs with filp->private_data in alloc_begin_fd_uobject */ + uobj_put(uobj); +} + +static int uburma_hot_unplug_jfce(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jfce_uobj *jfce = + container_of(uobj, struct uburma_jfce_uobj, uobj); + struct uburma_jfe *jfe = &jfce->jfe; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting == true) { + spin_unlock_irq(&jfe->lock); + return 0; + } + jfe->deleting = true; + spin_unlock_irq(&jfe->lock); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + wake_up_interruptible(&jfe->poll_wait); + + uburma_uninit_jfe(jfe); + return 0; +} + +static int uburma_hot_unplug_jfae(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_jfae_uobj *jfae = + container_of(uobj, struct uburma_jfae_uobj, uobj); + struct uburma_jfe *jfe = &jfae->jfe; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting == true) { + spin_unlock_irq(&jfe->lock); + return 0; + } + spin_unlock_irq(&jfe->lock); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + uburma_write_event(&jfae->jfe, 0, UBCORE_EVENT_DEV_FATAL, NULL, + NULL); + + spin_lock_irq(&jfe->lock); + jfe->deleting = true; + ubcore_unregister_event_handler(jfae->dev, &jfae->event_handler); + spin_unlock_irq(&jfe->lock); + + return 0; +} + +static int uburma_hot_unplug_notifier(struct uburma_uobj *uobj, + enum uburma_remove_reason why) +{ + struct uburma_notifier_uobj *notifier = + container_of(uobj, struct uburma_notifier_uobj, uobj); + struct uburma_jfe *jfe = ¬ifier->jfe; + + spin_lock_irq(&jfe->lock); + if (jfe->deleting == true) { + spin_unlock_irq(&jfe->lock); + return 0; + } + jfe->deleting = true; + spin_unlock_irq(&jfe->lock); + + if (why == UBURMA_REMOVE_DRIVER_REMOVE) + wake_up_interruptible(&jfe->poll_wait); + + uburma_uninit_jfe(jfe); + return 0; +} + +const struct uobj_type_class uobj_idr_type_class = { + .alloc_begin = uobj_idr_alloc_begin, + .alloc_commit = uobj_idr_alloc_commit, + .alloc_abort = uobj_idr_alloc_abort, + .lookup_get = uobj_idr_lookup_get, + .lookup_put = uobj_idr_lookup_put, + .remove_commit = uobj_idr_remove_commit, + .remove_commit_ex = uobj_idr_remove_commit_batch, +}; + +const struct uobj_type_class uobj_fd_type_class = { + .alloc_begin = uobj_fd_alloc_begin, + .alloc_commit = uobj_fd_alloc_commit, + .alloc_abort = uobj_fd_alloc_abort, + .lookup_get = uobj_fd_lookup_get, + .lookup_put = uobj_fd_lookup_put, + .remove_commit = uobj_fd_remove_commit, +}; + +/* The destroy process start from order 0. */ +declare_uobj_class(UOBJ_CLASS_JFCE, + &uobj_type_alloc_fd(3, sizeof(struct uburma_jfce_uobj), + uburma_hot_unplug_jfce, + &uburma_jfce_fops, "[jfce]", + O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_JFAE, + &uobj_type_alloc_fd(3, sizeof(struct uburma_jfae_uobj), + uburma_hot_unplug_jfae, + &uburma_jfae_fops, "[jfae]", + O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_NOTIFIER, + &uobj_type_alloc_fd(2, sizeof(struct uburma_notifier_uobj), + uburma_hot_unplug_notifier, + &uburma_notifier_fops, "[notifier]", + O_RDWR | O_CLOEXEC)); + +declare_uobj_class(UOBJ_CLASS_JFC, + &uobj_type_alloc_idr_ex(sizeof(struct uburma_jfc_uobj), 2, + uburma_free_jfc, + uburma_free_jfc_batch)); +declare_uobj_class(UOBJ_CLASS_TOKEN, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, + uburma_free_token_id)); +declare_uobj_class(UOBJ_CLASS_SEG, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 1, + uburma_free_seg)); +declare_uobj_class(UOBJ_CLASS_JFS, + &uobj_type_alloc_idr_ex(sizeof(struct uburma_jfs_uobj), 1, + uburma_free_jfs, + uburma_free_jfs_batch)); +declare_uobj_class(UOBJ_CLASS_JFR, + &uobj_type_alloc_idr_ex(sizeof(struct uburma_jfr_uobj), 1, + uburma_free_jfr, + uburma_free_jfr_batch)); +declare_uobj_class(UOBJ_CLASS_JETTY, + &uobj_type_alloc_idr_ex(sizeof(struct uburma_jetty_uobj), 1, + uburma_free_jetty, + uburma_free_jetty_batch)); +declare_uobj_class(UOBJ_CLASS_JETTY_GRP, + &uobj_type_alloc_idr(sizeof(struct uburma_jetty_grp_uobj), 1, + uburma_free_jetty_grp)); +declare_uobj_class(UOBJ_CLASS_TARGET_JFR, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, + uburma_free_tjfr)); +declare_uobj_class(UOBJ_CLASS_TARGET_JETTY, + &uobj_type_alloc_idr(sizeof(struct uburma_tjetty_uobj), 0, + uburma_free_tjetty)); +declare_uobj_class(UOBJ_CLASS_TARGET_SEG, + &uobj_type_alloc_idr(sizeof(struct uburma_uobj), 0, + uburma_free_tseg)); diff --git a/drivers/ub/urma/uburma/uburma_uobj.h b/drivers/ub/urma/uburma/uburma_uobj.h new file mode 100644 index 000000000000..4ea3b028a04a --- /dev/null +++ b/drivers/ub/urma/uburma/uburma_uobj.h @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: uobj framework in uburma + * Author: Chen yujie + * Create: 2022-8-11 + * Note: + * History: 2022-8-11: Create file + */ + +#ifndef UBURMA_UOBJ_H +#define UBURMA_UOBJ_H + +#include + +enum UOBJ_CLASS_ID { + UOBJ_CLASS_ROOT, /* used by framework */ + UOBJ_CLASS_TOKEN, + UOBJ_CLASS_SEG, + UOBJ_CLASS_TARGET_SEG, + UOBJ_CLASS_JFR, + UOBJ_CLASS_JFS, + UOBJ_CLASS_JFC, + UOBJ_CLASS_JFCE, + UOBJ_CLASS_JFAE, + UOBJ_CLASS_TARGET_JFR, + UOBJ_CLASS_JETTY, + UOBJ_CLASS_TARGET_JETTY, + UOBJ_CLASS_JETTY_GRP, + UOBJ_CLASS_NOTIFIER, +}; + +enum uobj_access { + UOBJ_ACCESS_NOLOCK, + UOBJ_ACCESS_READ, /* LOCK READ */ + UOBJ_ACCESS_WRITE /* LOCK WRITE */ +}; + +struct uburma_uobj { + struct uburma_file *ufile; /* associated uburma file */ + void *object; /* containing object */ + struct list_head list; /* link to context's list */ + int id; /* index into kernel idr */ + struct kref ref; /* ref of object associated with uobj */ + atomic_t rcnt; /* protects exclusive access */ + struct rcu_head rcu; /* kfree_rcu() overhead */ + + const struct uobj_type *type; + struct ubcore_cg_object cg_obj; /* cgroup control */ + int class_id; +}; + +struct uobj_type { + const struct uobj_type_class *const type_class; + size_t obj_size; + unsigned int destroy_order; +}; + +struct uobj_type_class { + struct uburma_uobj *(*alloc_begin)(const struct uobj_type *type, + struct uburma_file *ufile); + void (*alloc_commit)(struct uburma_uobj *uobj); + void (*alloc_abort)(struct uburma_uobj *uobj); + struct uburma_uobj *(*lookup_get)(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag); + void (*lookup_put)(struct uburma_uobj *uobj, enum uobj_access flag); + int __must_check (*remove_commit)(struct uburma_uobj *uobj, + enum uburma_remove_reason why); + int __must_check (*remove_commit_ex)(struct uburma_uobj **uobj, + int arr_num, int *bad_index, + enum uburma_remove_reason why); +}; + +struct uobj_idr_type { + struct uobj_type type; + int __must_check (*destroy_func)(struct uburma_uobj *uobj, + enum uburma_remove_reason why); +}; + +struct uobj_idr_ex_type { + struct uobj_type type; + int __must_check (*destroy_func)(struct uburma_uobj *uobj, + enum uburma_remove_reason why); + int __must_check (*destroy_batch_func)(struct uburma_uobj **uobj, + int arr_num, int *bad_index, + enum uburma_remove_reason why); +}; + +struct uobj_fd_type { + struct uobj_type type; + const char *name; + const struct file_operations *fops; + int flags; + int (*context_closed)(struct uburma_uobj *uobj, + enum uburma_remove_reason why); +}; + +struct uobj_class_def { + uint16_t id; + const struct uobj_type *type_attrs; +}; + +struct uburma_jfe { + spinlock_t lock; + struct list_head event_list; + wait_queue_head_t poll_wait; + bool deleting; + struct fasync_struct *async_queue; +}; + +struct uburma_jfce_uobj { + struct uburma_uobj uobj; + struct uburma_jfe jfe; +}; + +struct uburma_jfc_uobj { + struct uburma_uobj uobj; + struct uburma_uobj *jfce; /* associated jfce uobj */ + struct list_head comp_event_list; + struct list_head async_event_list; + uint32_t comp_events_reported; + uint32_t async_events_reported; +}; + +struct uburma_jfc_uobj_array { + struct uburma_jfc_uobj *uobj_arr; + int size; +}; + +struct uburma_jfs_uobj { + struct uburma_uobj uobj; + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jfr_uobj { + struct uburma_uobj uobj; + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jetty_uobj { + struct uburma_uobj uobj; + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_jetty_grp_uobj { + struct uburma_uobj uobj; + struct list_head async_event_list; + uint32_t async_events_reported; +}; + +struct uburma_tjetty_uobj { + struct uburma_uobj uobj; + struct uburma_jetty_uobj *jetty_uobj; + bool should_unimport_async; +}; + +struct uburma_jfae_uobj { + struct uburma_uobj uobj; + struct uburma_jfe jfe; + struct ubcore_event_handler event_handler; + struct ubcore_device *dev; +}; + +struct uburma_notifier_uobj { + struct uburma_uobj uobj; + struct uburma_jfe jfe; + atomic_t incomplete_cnt; +}; + +extern const struct uobj_type_class uobj_idr_type_class; +extern const struct uobj_type_class uobj_fd_type_class; + +/* uobj base ops */ +struct uburma_uobj *uobj_alloc_begin(const struct uobj_type *type, + struct uburma_file *ufile, int class_id); +void uobj_alloc_commit(struct uburma_uobj *uobj); +void uobj_alloc_abort(struct uburma_uobj *uobj); +struct uburma_uobj *uobj_lookup_get(const struct uobj_type *type, + struct uburma_file *ufile, int id, + enum uobj_access flag); +void uobj_lookup_put(struct uburma_uobj *uobj, enum uobj_access flag); +int __must_check uobj_remove_commit(struct uburma_uobj *uobj); +int __must_check uobj_remove_commit_batch(struct uburma_uobj **uobj_arr, + int arr_num, int *bad_index); +void uobj_get(struct uburma_uobj *uobj); +void uobj_put(struct uburma_uobj *uobj); +void uobj_put_batch(struct uburma_uobj **uobj_arr, int arr_num); + +/* internal api */ +void uburma_init_uobj_context(struct uburma_file *ufile); +void uburma_cleanup_uobjs(struct uburma_file *ufile, + enum uburma_remove_reason why); + +void uburma_uobj_batch_del_enable(struct ubcore_ops *ops, + bool *enable_batch_class, bool *is_batch); + +void uburma_close_uobj_fd(struct file *f); + +#define uobj_class_name(class_id) uobj_class_##class_id + +#define uobj_get_type(class_id) uobj_class_name(class_id).type_attrs + +#define _uobj_class_set(_id, _type_attrs) \ + ((const struct uobj_class_def){ .id = (_id), \ + .type_attrs = (_type_attrs) }) + +#define _declare_uobj_class(_name, _id, _type_attrs) \ + const struct uobj_class_def _name = _uobj_class_set(_id, _type_attrs) + +#define declare_uobj_class(class_id, ...) \ + _declare_uobj_class(uobj_class_name(class_id), class_id, ##__VA_ARGS__) + +#define uobj_type_alloc_idr(_size, _order, _destroy_func) \ + ((&((const struct uobj_idr_type) { \ + .type = { \ + .type_class = &uobj_idr_type_class, \ + .obj_size = (_size), \ + .destroy_order = (_order), \ + }, \ + .destroy_func = (_destroy_func), \ + }))->type) + +#define uobj_type_alloc_idr_ex(_size, _order, _destroy_func, \ + _destroy_batch_func) \ + ((&((const struct uobj_idr_ex_type) { \ + .type = { \ + .type_class = &uobj_idr_type_class, \ + .obj_size = (_size), \ + .destroy_order = (_order), \ + }, \ + .destroy_func = (_destroy_func), \ + .destroy_batch_func = (_destroy_batch_func), \ + }))->type) + +#define uobj_type_alloc_fd(_order, _obj_size, _context_closed, _fops, _name, \ + _flags) \ + ((&((const struct uobj_fd_type) { \ + .type = { \ + .destroy_order = (_order), \ + .type_class = &uobj_fd_type_class, \ + .obj_size = (_obj_size), \ + }, \ + .context_closed = (_context_closed), \ + .fops = (_fops), \ + .name = (_name), \ + .flags = (_flags) \ + }))->type) + +static inline bool uobj_type_is_fd(const struct uburma_uobj *uobj) +{ + return uobj->type->type_class == &uobj_fd_type_class; +} + +#define uobj_alloc(class_id, ufile) \ + uobj_alloc_begin(uobj_get_type(class_id), ufile, class_id) + +#define uobj_get_read(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_READ) + +#define uobj_put_read(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_READ) + +#define uobj_get_write(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_WRITE) + +#define uobj_put_write(uobj) uobj_lookup_put(uobj, UOBJ_ACCESS_WRITE) + +/* Do not lock uobj without cleanup_rwsem locked */ +#define uobj_get_del(class_id, _id, ufile) \ + uobj_lookup_get(uobj_get_type(class_id), ufile, _id, UOBJ_ACCESS_NOLOCK) + +#define uobj_put_del(uobj) uobj_put(uobj) +#define uobj_put_del_batch(uobj_arr, arr_num) uobj_put_batch(uobj_arr, arr_num) + +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TOKEN; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_SEG; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFCE; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFAE; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFC; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFR; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JFS; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JETTY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_JETTY_GRP; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JFR; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_SEG; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_TARGET_JETTY; +extern const struct uobj_class_def uobj_class_UOBJ_CLASS_NOTIFIER; + +extern const struct file_operations uburma_jfce_fops; +extern const struct file_operations uburma_jfae_fops; +extern const struct file_operations uburma_notifier_fops; + +#endif /* UBURMA_UOBJ_H */ diff --git a/include/ub/urma/ubcore_api.h b/include/ub/urma/ubcore_api.h new file mode 100644 index 000000000000..1ed420be792e --- /dev/null +++ b/include/ub/urma/ubcore_api.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: API definition provided by ubcore to ubep device driver + * Author: Qian Guoxin + * Create: 2022-1-25 + * Note: + * History: 2022-1-25: Create file + */ + +#ifndef UBCORE_API_H +#define UBCORE_API_H + +#include "ubcore_types.h" + +/** + * Register a device to ubcore + * @param[in] dev: the ubcore device; + * @return: 0 on success, other value on error + */ +int ubcore_register_device(struct ubcore_device *dev); +/** + * Unregister a device from ubcore + * @param[in] dev: the ubcore device; + */ +void ubcore_unregister_device(struct ubcore_device *dev); +/** + * Stop a device from ubcore + * @param[in] dev: the ubcore device; + */ +void ubcore_stop_requests(struct ubcore_device *dev); +/** + * Dispatch an asynchronous event to all registered handlers + * @param[in] event: asynchronous event; + */ +void ubcore_dispatch_async_event(struct ubcore_event *event); + +/** + * Allocate physical memory and do DMA mapping + * @param[in] dev: the ubcore device; + * @param[in] va: the VA address to be mapped. + * @param[in] len: Length of the address space to be allocated and mapped by DMA. + * @param[in] flag: Attribute flags + * Return: umem ptr on success, ERR_PTR on error + */ +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag); + +/** + * Find best HW page size to use for this segment + * @param[in] umem: umem struct, return of ubcore_umem_get; + * @param[in] page_size_bitmap: bitmap of HW supported page sizes, must include PAGE_SIZE; + * @param[in] va: Initial address of this segment. + * Return: before kernel 5.3: return 4K; + * kernel 5.3 and later: Returns 0 if the umem requires page sizes not supported by the + * driver to be mapped; returns non-zero on the best page size. + */ +uint64_t ubcore_umem_find_best_page_size(struct ubcore_umem *umem, + uint64_t page_size_bitmap, + uint64_t va); + +/** + * Release umem allocated + * @param[in] umem: the ubcore umem created before + */ +void ubcore_umem_release(struct ubcore_umem *umem); + +/** + * Invoke get mtu value, called only by driver + * @param[in] mtu: specifies the MTU value of the NIC interface. + * @return: The MTU of the UB protocol, this value removes the length of the network layer, + * transport layer, transaction layer header and ICRC. + */ +enum ubcore_mtu ubcore_get_mtu(int mtu); + +/** + * MUE receives msg from UE or MUE, called only by driver. + * @param[in] dev: MUE device; + * @param[in] req: received msg; + * @return: 0 on success, other value on error + */ +int ubcore_recv_req(struct ubcore_device *dev, struct ubcore_req_host *req); + +/** + * UE or MUE receives msg from MUE, called only by driver. + * @param[in] dev: UE or MUE device; + * @param[in] resp: received msg; + * @return: 0 on success, other value on error + */ +int ubcore_recv_resp(struct ubcore_device *dev, struct ubcore_resp *resp); + +/** + * Invoke ndev bind port_id, called only by driver + * @param[in] dev: the ubcore device; + * @param[in] ndev: The netdev corresponding to the initial port + * @param[in] port_id: The physical port_id is the same as the port_id presented in the sysfs file, + * and port_id is configured in TP during link establishment. + * @return: 0 on success, other value on error + */ +int ubcore_set_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id); + +/** + * Invoke ndev unbind port_id, called only by driver + * @param[in] dev: the ubcore device; + * @param[in] ndev: The netdev corresponding to the initial port + * @param[in] port_id: The physical port_id is the same as the port_id presented in the sysfs file, + * and port_id is configured in TP during link establishment. + * @return: 0 on success, other value on error + */ +int ubcore_unset_port_netdev(struct ubcore_device *dev, struct net_device *ndev, + unsigned int port_id); + +/** + * Invoke ndev unbind port_id, called only by driver + * @param[in] dev: the ubcore device; + * @return: void + */ +void ubcore_put_port_netdev(struct ubcore_device *dev); + +/** + * Invoke The management system calls ubcore interface through uvs_admin to set the device name + * and add sip information used for link establishment. + * @param[in] sip: Specify the sip information used to establish the link, including device name, + * sip, mac, vlan, physical port list. + * @return: 0 on success, other value on error + */ +int ubcore_add_sip(struct ubcore_sip_info *sip, uint32_t *sip_idx); + +/** + * Invoke The management system calls ubcore interface through UVS to delete the sip information. + * @param[in] sip: Specify the sip information used to establish the link, including device name, + * sip, mac, vlan, physical port list. + * @return: 0 on success, other value on error + */ +int ubcore_delete_sip(struct ubcore_sip_info *sip); + +/** + * Invoke get eid list + * @param[in] dev: the ubcore device; + * @param[out] cnt: eid cnt + * @return: eid info on success, NULL on error + */ +struct ubcore_eid_info *ubcore_get_eid_list(struct ubcore_device *dev, + uint32_t *cnt); + +/** + * Release umem allocated + * @param[in] eid_list: the eid list to be freed + */ +void ubcore_free_eid_list(struct ubcore_eid_info *eid_list); + +/** + * Ubcore dispatches provider management event + * @param[in] event: event message, including event type, eid_info, etc. + */ +void ubcore_dispatch_mgmt_event(struct ubcore_mgmt_event *event); + +#endif diff --git a/include/ub/urma/ubcore_jetty.h b/include/ub/urma/ubcore_jetty.h new file mode 100644 index 000000000000..86dfaf862270 --- /dev/null +++ b/include/ub/urma/ubcore_jetty.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore jetty find header file + * Author: Zhang jiayu + * Create: 2024-3-12 + * Note: + * History: 2024-3-12: Create file + */ + +#ifndef UBCORE_JETTY_H +#define UBCORE_JETTY_H + +#include "ubcore_types.h" + +/* The APIs below are deprecated, should not be called by driver or ubcore client */ +struct ubcore_jfc *ubcore_find_jfc(struct ubcore_device *dev, uint32_t jfc_id); + +// Deprecated, use ubcore_find_get_jfs +struct ubcore_jfs *ubcore_find_jfs(struct ubcore_device *dev, uint32_t jfs_id); +struct ubcore_jfs *ubcore_find_get_jfs(struct ubcore_device *dev, + uint32_t jfs_id); +void ubcore_put_jfs(struct ubcore_jfs *jfs); +void ubcore_jfs_get(void *obj); + +// Deprecated, use ubcore_find_get_jfr +struct ubcore_jfr *ubcore_find_jfr(struct ubcore_device *dev, uint32_t jfr_id); +struct ubcore_jfr *ubcore_find_get_jfr(struct ubcore_device *dev, + uint32_t jfr_id); +void ubcore_put_jfr(struct ubcore_jfr *jfr); +void ubcore_jfr_get(void *obj); + +// Deprecated, use ubcore_find_get_jetty +struct ubcore_jetty *ubcore_find_jetty(struct ubcore_device *dev, + uint32_t jetty_id); +struct ubcore_jetty *ubcore_find_get_jetty(struct ubcore_device *dev, + uint32_t jetty_id); +void ubcore_put_jetty(struct ubcore_jetty *jetty); +void ubcore_jetty_get(void *obj); + +#endif diff --git a/include/ub/urma/ubcore_opcode.h b/include/ub/urma/ubcore_opcode.h new file mode 100644 index 000000000000..a55a09caa347 --- /dev/null +++ b/include/ub/urma/ubcore_opcode.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore opcode header file + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2023-2-28 + * Note: + * History: 2023-2-28: Create file + */ + +#ifndef UBCORE_OPCODE_H +#define UBCORE_OPCODE_H + +/* Indicates the verification policy of the key. */ +#define UBCORE_TOKEN_NONE 0 +#define UBCORE_TOKEN_PLAIN_TEXT 1 +#define UBCORE_TOKEN_SIGNED 2 +#define UBCORE_TOKEN_ALL_ENCRYPTED 3 +#define UBCORE_TOKEN_RESERVED 4 + +#define UBCORE_TOKEN_ID_INVALID 0 +#define UBCORE_TOKEN_ID_VALID 1 + +/* Indicates whether the segment can be cached by multiple hosts. */ +#define UBCORE_NON_CACHEABLE 0 +#define UBCORE_CACHEABLE 1 + +/* Indicates that the current process has mapped this segment */ +#define UBCORE_SEG_NOMAP 0 +#define UBCORE_SEG_MAPPED 1 + +/* Notify the source after the task is completed. */ +#define UBCORE_COMPLETE_ENABLE 1 +/* Do not notify the source after the task is complete. */ +#define UBCORE_COMPLETE_DISABLE 0 + +/* There is no interruption when notifying through JFC. */ +#define UBCORE_SOLICITED_DISABLE 0 +/* Interrupt occurred while notifying via JFC. */ +#define UBCORE_SOLICITED_ENABLE 1 + +/* There is no fence. */ +#define UBCORE_FENCE_DISABLE 0 +/* Fence with previous WRs. */ +#define UBCORE_FENCE_ENABLE 1 + +/* The data is generated by source_address assignment. */ +#define UBCORE_INLINE_DISABLE 0 +/* The data is carried in the command. */ +#define UBCORE_INLINE_ENABLE 1 + +#define UBCORE_NO_SHARE_JFR 0 +#define UBCORE_SHARE_JFR 1 + +/* opcode definition */ +/* Must be consistent with urma_opcode_t */ +enum ubcore_opcode { + UBCORE_OPC_WRITE = 0x00, + UBCORE_OPC_WRITE_IMM = 0x01, + UBCORE_OPC_WRITE_NOTIFY = 0x02, + UBCORE_OPC_READ = 0x10, + UBCORE_OPC_CAS = 0x20, + UBCORE_OPC_SWAP = 0x21, + UBCORE_OPC_FADD = 0x22, + UBCORE_OPC_FSUB = 0x23, + UBCORE_OPC_FAND = 0x24, + UBCORE_OPC_FOR = 0x25, + UBCORE_OPC_FXOR = 0x26, + UBCORE_OPC_SEND = 0x40, // remote JFR/jetty ID + UBCORE_OPC_SEND_IMM = 0x41, // remote JFR/jetty ID + UBCORE_OPC_SEND_INVALIDATE = + 0x42, // remote JFR/jetty ID and seg token id + UBCORE_OPC_NOP = 0x51, + UBCORE_OPC_LAST +}; + +/* completion information */ +/* Must be consistent with urma_cr_status_t */ +enum ubcore_cr_status { // completion record status + UBCORE_CR_SUCCESS = 0, + UBCORE_CR_UNSUPPORTED_OPCODE_ERR, + UBCORE_CR_LOC_LEN_ERR, // Local data too long error + UBCORE_CR_LOC_OPERATION_ERR, // Local operation err + UBCORE_CR_LOC_ACCESS_ERR, // Access to local memory error when WRITE_WITH_IMM + UBCORE_CR_REM_RESP_LEN_ERR, + UBCORE_CR_REM_UNSUPPORTED_REQ_ERR, + UBCORE_CR_REM_OPERATION_ERR, + /* Memory access protection error occurred in the remote node */ + UBCORE_CR_REM_ACCESS_ABORT_ERR, + UBCORE_CR_ACK_TIMEOUT_ERR, + /* RNR retries exceeded the maximum number: remote jfr has no buffer */ + UBCORE_CR_RNR_RETRY_CNT_EXC_ERR, + UBCORE_CR_FLUSH_ERR, + UBCORE_CR_WR_SUSPEND_DONE, + UBCORE_CR_WR_FLUSH_ERR_DONE, + UBCORE_CR_WR_UNHANDLED, + UBCORE_CR_LOC_DATA_POISON, + UBCORE_CR_REM_DATA_POISON +}; + +/* Must be consistent with urma_cr_opcode_t */ +enum ubcore_cr_opcode { + UBCORE_CR_OPC_SEND = 0x00, + UBCORE_CR_OPC_SEND_WITH_IMM, + UBCORE_CR_OPC_SEND_WITH_INV, + UBCORE_CR_OPC_WRITE_WITH_IMM +}; + +enum ubcore_slice { + UBCORE_SLICE_32K = 1 << 15, + UBCORE_SLICE_64K = 1 << 16, + UBCORE_SLICE_128K = 1 << 17, + UBCORE_SLICE_256K = 1 << 18 +}; + +#endif diff --git a/include/ub/urma/ubcore_types.h b/include/ub/urma/ubcore_types.h new file mode 100644 index 000000000000..3749f0575e4b --- /dev/null +++ b/include/ub/urma/ubcore_types.h @@ -0,0 +1,3074 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: Types definition provided by ubcore to client and ubep device + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + * History: 2021-11-23: Add segment and jetty management + */ + +#ifndef UBCORE_TYPES_H +#define UBCORE_TYPES_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_CGROUP_RDMA +#include +#endif + +#include "ubcore_opcode.h" + +#define UBCORE_GET_VERSION(a, b) (((a) << 16) + ((b) > 65535 ? 65535 : (b))) +#define UBCORE_API_VERSION ((0 << 16) + 9) // Current Version: 0.9 + +#define UBCORE_MAX_JETTY_IN_JETTY_GRP 32U +#define UBCORE_MAX_PORT_CNT 16 +#define UBCORE_MAX_UE_CNT 1024 +#define UBCORE_MAX_DEV_NAME 64 +#define UBCORE_MAX_DRIVER_NAME 64 +#define UBCORE_HASH_TABLE_SIZE 10240 +#define UBCORE_NET_ADDR_BYTES (16) +#define UBCORE_IP_ADDR_BYTES 16 +#define UBCORE_MAC_BYTES 6 +#define UBCORE_MAX_ATTR_GROUP 3 +#define UBCORE_EID_SIZE (16) +#define UBCORE_EID_STR_LEN (39) +#define UBCORE_DEVID_SIZE (16) +#define UBCORE_GUID_SIZE (16) +#define UBCORE_MAX_MSG 4096 +#define UBCORE_MAX_EID_CNT 1024 +#define UBCORE_MAX_VTP_CNT_PER_MUE \ + (128 * 1024) // Temporarily specify the upper limit +#define UBCORE_MAX_TPG_CNT_PER_MUE \ + (16 * 1024) // Temporarily specify the upper limit +#define UBCORE_EID_GROUP_NAME_LEN 10 +#define UBCORE_MAX_MIG_ENTRY_CNT 64 +#define UBCORE_RESERVED_JETTY_ID_MIN 0 +#define UBCORE_RESERVED_JETTY_ID_MAX 1023 + +#define EID_FMT \ + "%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x:%2.2x%2.2x" +#define EID_UNPACK(...) __VA_ARGS__ +#define EID_RAW_ARGS(eid) \ + EID_UNPACK(eid[0], eid[1], eid[2], eid[3], eid[4], eid[5], eid[6], \ + eid[7], eid[8], eid[9], eid[10], eid[11], eid[12], eid[13], \ + eid[14], eid[15]) +#define EID_ARGS(eid) EID_RAW_ARGS((eid).raw) + +#define UBCORE_OWN_UE_IDX (0xffff) +#define UBCORE_JETTY_GRP_MAX_NAME 64 +#define UBCORE_MAX_TP_CNT_IN_GRP 32 +/* support 8 priorities and 8 algorithms */ +/* same as URMA_CC_IDX_TABLE_SIZE */ +#define UBCORE_CC_IDX_TABLE_SIZE 81 +#define UBCORE_SIP_TABLE_SIZE (1024) +#define UBCORE_MAX_SIP UBCORE_SIP_TABLE_SIZE +#define UBCORE_CHECK_RETURN_ERR_PTR(ptr, err) \ + ((ptr) == NULL ? ERR_PTR(-(err)) : (ptr)) + +#define UBCORE_MAX_DSCP_NUM (64) + +enum ubcore_transport_type { + UBCORE_TRANSPORT_INVALID = -1, + UBCORE_TRANSPORT_UB = 0, + UBCORE_TRANSPORT_MAX +}; + +enum ubcore_resource_type { + UBCORE_RESOURCE_HCA_HANDLE = 0, + UBCORE_RESOURCE_HCA_OBJECT, + UBCORE_RESOURCE_HCA_MAX +}; + +enum ubcore_ldev_attr_group { + UBCORE_ATTR_GROUP_DEV_DEFAULT = 0, + UBCORE_ATTR_GROUP_EIDS = 1, + UBCORE_ATTR_GROUP_NULL = 2, + UBCORE_ATTR_GROUP_MAX = 3 +}; + +/* If UBCORE_ACCESS_LOCAL_ONLY is set, local access will have all the permissions of + * READ, WRITE, and ATOMIC but external access is denied. + * If UBCORE_ACCESS_LOCAL_ONLY is not set, in addition to having all permissions for local access, + * the configuration of external access permissions is determined by the following three types, and + * it takes effect according to the combination of READ, WRITE, and ATOMIC configured by the user. + */ +#define UBCORE_ACCESS_LOCAL_ONLY 0x1 +#define UBCORE_ACCESS_READ (0x1 << 1) +#define UBCORE_ACCESS_WRITE (0x1 << 2) +#define UBCORE_ACCESS_ATOMIC (0x1 << 3) + +#define UBCORE_SEG_TOKEN_ID_INVALID UINT_MAX + +struct ubcore_cg_device { +#ifdef CONFIG_CGROUP_RDMA + struct rdmacg_device dev; +#endif +}; + +struct ubcore_cg_object { +#ifdef CONFIG_CGROUP_RDMA + struct rdma_cgroup *cg; +#endif +}; + +union ubcore_eid { + uint8_t raw[UBCORE_EID_SIZE]; + struct { + uint64_t reserved; + uint32_t prefix; + uint32_t addr; + } in4; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } in6; +}; + +struct ubcore_eid_info { + union ubcore_eid eid; + uint32_t eid_index; /* 0~MAX_EID_CNT -1 */ +}; + +struct ubcore_ueid_cfg { + union ubcore_eid eid; + uint32_t upi; + uint32_t eid_index; + guid_t guid; +}; + +struct ubcore_devid { + uint8_t raw[UBCORE_DEVID_SIZE]; +}; + +struct ubcore_jetty_id { + union ubcore_eid eid; + uint32_t id; +}; + +struct ubcore_ubva { + union ubcore_eid eid; + uint64_t va; +} __packed; + +struct ubcore_ht_param { + uint32_t size; + uint32_t node_offset; /* offset of hlist node in the hash table object */ + uint32_t key_offset; + uint32_t key_size; + int (*cmp_f)(void *obj, const void *key); + void (*free_f)(void *obj); + void (*get_f)(void *obj); +}; + +struct ubcore_hash_table { + struct ubcore_ht_param p; + struct hlist_head *head; + /* Prevent the same jetty + * from being bound by different tjetty + */ + struct ubcore_jetty_id rc_tjetty_id; + spinlock_t lock; + struct kref kref; +}; + +union ubcore_jfc_flag { + struct { + uint32_t lock_free : 1; + uint32_t jfc_inline : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +#define UBCORE_SUB_TRANS_MODE_TA_DST_ORDERING_ENABLE (0x1) +#define UBCORE_SUB_TRANS_MODE_USER_TP (0x2) + +enum ubcore_order_type { + UBCORE_DEF_ORDER, + UBCORE_OT, // target ordering + UBCORE_OI, // initiator ordering + UBCORE_OL, // low layer ordering + UBCORE_NO // unreliable non ordering +}; + +union ubcore_jfs_flag { + struct { + uint32_t lock_free : 1; + uint32_t error_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t order_type : 8; /* (0x0): default, auto config by driver */ + /* (0x1): OT, target ordering */ + /* (0x2): OI, initiator ordering */ + /* (0x3): OL, low layer ordering */ + /* (0x4): UNO, unreliable non ordering */ + uint32_t multi_path : 1; + uint32_t reserved : 20; + } bs; + uint32_t value; +}; + +union ubcore_jfr_flag { + struct { + /* 0: UBCORE_TOKEN_NONE + * 1: UBCORE_TOKEN_PLAIN_TEXT + * 2: UBCORE_TOKEN_SIGNED + * 3: UBCORE_TOKEN_ALL_ENCRYPTED + * 4: UBCORE_TOKEN_RESERVED + */ + uint32_t token_policy : 3; + uint32_t tag_matching : 1; + uint32_t lock_free : 1; + uint32_t order_type : 8; /* (0x0): default, auto config by driver */ + /* (0x1): OT, target ordering */ + /* (0x2): OI, initiator ordering */ + /* (0x3): OL, low layer ordering */ + /* (0x4): UNO, unreliable non ordering */ + uint32_t reserved : 19; + } bs; + uint32_t value; +}; + +enum ubcore_jfc_attr_mask { + UBCORE_JFC_MODERATE_COUNT = 0x1, + UBCORE_JFC_MODERATE_PERIOD = 0x1 << 1 +}; + +struct ubcore_jfc_attr { + uint32_t mask; /* mask value refer to enum ubcore_jfc_attr_mask */ + uint16_t moderate_count; + uint16_t moderate_period; /* in micro seconds */ +}; + +enum ubcore_jfc_state { + UBCORE_JFC_STATE_INVALID = 0, + UBCORE_JFC_STATE_VALID, + UBCORE_JFC_STATE_ERROR +}; + +enum ubcore_jetty_state { + UBCORE_JETTY_STATE_RESET = 0, + UBCORE_JETTY_STATE_READY, + UBCORE_JETTY_STATE_SUSPENDED, + UBCORE_JETTY_STATE_ERROR +}; + +enum ubcore_jfr_state { + UBCORE_JFR_STATE_RESET = 0, + UBCORE_JFR_STATE_READY, + UBCORE_JFR_STATE_ERROR +}; + +enum ubcore_jfs_attr_mask { UBCORE_JFS_STATE = 0x1 }; + +struct ubcore_jfs_attr { + uint32_t mask; /* mask value refer to ubcore_jfs_attr_mask_t */ + enum ubcore_jetty_state state; +}; + +enum ubcore_jfr_attr_mask { + UBCORE_JFR_RX_THRESHOLD = 0x1, + UBCORE_JFR_STATE = 0x1 << 1 +}; + +struct ubcore_jfr_attr { + uint32_t mask; /* mask value refer to enum ubcore_jfr_attr_mask */ + uint32_t rx_threshold; + enum ubcore_jfr_state state; +}; + +enum ubcore_jetty_attr_mask { + UBCORE_JETTY_RX_THRESHOLD = 0x1, + UBCORE_JETTY_STATE = 0x1 << 1 +}; + +struct ubcore_jetty_attr { + uint32_t mask; /* mask value refer to enum ubcore_jetty_attr_mask */ + uint32_t rx_threshold; + enum ubcore_jetty_state state; +}; + +union ubcore_import_seg_flag { + struct { + uint32_t cacheable : 1; + uint32_t access : 6; + uint32_t mapping : 1; + uint32_t reserved : 24; + } bs; + uint32_t value; +}; + +union ubcore_reg_seg_flag { + struct { + uint32_t token_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t token_id_valid : 1; + uint32_t pa : 1; + uint32_t reserved : 17; + } bs; + uint32_t value; +}; + +struct ubcore_udrv_priv { + uint64_t in_addr; + uint32_t in_len; + uint64_t out_addr; + uint32_t out_len; +}; + +struct ubcore_ucontext { + struct ubcore_device *ub_dev; + union ubcore_eid eid; + uint32_t eid_index; + void *jfae; /* jfae uobj */ + struct ubcore_cg_object cg_obj; + atomic_t use_cnt; +}; + +struct ubcore_udata { + struct ubcore_ucontext *uctx; + struct ubcore_udrv_priv *udrv_data; +}; + +struct ubcore_token { + uint32_t token; +}; + +enum ubcore_event_type { + UBCORE_EVENT_JFC_ERR, + UBCORE_EVENT_JFS_ERR, + UBCORE_EVENT_JFR_ERR, + UBCORE_EVENT_JFR_LIMIT_REACHED, + UBCORE_EVENT_JETTY_ERR, + UBCORE_EVENT_JETTY_LIMIT_REACHED, + UBCORE_EVENT_JETTY_GRP_ERR, + UBCORE_EVENT_PORT_ACTIVE, + UBCORE_EVENT_PORT_DOWN, + UBCORE_EVENT_DEV_FATAL, + UBCORE_EVENT_EID_CHANGE, + UBCORE_EVENT_TP_ERR, + UBCORE_EVENT_TP_SUSPEND, + UBCORE_EVENT_TP_FLUSH_DONE, + UBCORE_EVENT_ELR_ERR, + UBCORE_EVENT_ELR_DONE, + UBCORE_EVENT_MIGRATE_VTP_SWITCH, + UBCORE_EVENT_MIGRATE_VTP_ROLLBACK +}; + +/* transport mode */ +enum ubcore_transport_mode { + UBCORE_TP_RM = 0x1, /* Reliable message */ + UBCORE_TP_RC = 0x1 << 1, /* Reliable connection */ + UBCORE_TP_UM = 0x1 << 2 /* Unreliable message */ +}; + +enum ubcore_jetty_grp_policy { + UBCORE_JETTY_GRP_POLICY_RR = 0, + UBCORE_JETTY_GRP_POLICY_HASH_HINT = 1 +}; + +enum ubcore_target_type { UBCORE_JFR = 0, UBCORE_JETTY, UBCORE_JETTY_GROUP }; + +union ubcore_token_id_flag { + struct { + uint32_t pa : 1; + uint32_t multi_seg : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubcore_token_id { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + uint32_t token_id; // driver fill + union ubcore_token_id_flag flag; + atomic_t use_cnt; +}; + +struct ubcore_seg_cfg { + uint64_t va; + uint64_t len; + uint32_t eid_index; + struct ubcore_token_id *token_id; + struct ubcore_token token_value; + union ubcore_reg_seg_flag flag; + uint64_t user_ctx; + uint64_t iova; +}; + +union ubcore_seg_attr { + struct { + uint32_t token_policy : 3; + uint32_t cacheable : 1; + uint32_t dsva : 1; + uint32_t access : 6; + uint32_t non_pin : 1; + uint32_t user_iova : 1; + uint32_t user_token_id : 1; + uint32_t pa : 1; + uint32_t reserved : 17; + } bs; + uint32_t value; +}; + +struct ubcore_seg { + struct ubcore_ubva ubva; + uint64_t len; + union ubcore_seg_attr attr; + uint32_t token_id; +}; + +struct ubcore_target_seg_cfg { + struct ubcore_seg seg; + union ubcore_import_seg_flag flag; + uint64_t mva; /* optional */ + struct ubcore_token token_value; +}; + +struct ubcore_target_seg { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_seg seg; + uint64_t mva; + struct ubcore_token_id *token_id; + atomic_t use_cnt; +}; + +enum ubcore_mtu { + UBCORE_MTU_256 = 1, + UBCORE_MTU_512, + UBCORE_MTU_1024, + UBCORE_MTU_2048, + UBCORE_MTU_4096, + UBCORE_MTU_8192 +}; + +enum ubcore_tp_cc_alg { + UBCORE_TP_CC_NONE = 0, + UBCORE_TP_CC_DCQCN, + UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, + UBCORE_TP_CC_LDCP, + UBCORE_TP_CC_LDCP_AND_CAQM, + UBCORE_TP_CC_LDCP_AND_OPEN_CC, + UBCORE_TP_CC_HC3, + UBCORE_TP_CC_DIP, + UBCORE_TP_CC_ACC, + UBCORE_TP_CC_NUM +}; + +enum ubcore_congestion_ctrl_alg { + UBCORE_CC_NONE = 0x1 << UBCORE_TP_CC_NONE, + UBCORE_CC_DCQCN = 0x1 << UBCORE_TP_CC_DCQCN, + UBCORE_CC_DCQCN_AND_NETWORK_CC = 0x1 + << UBCORE_TP_CC_DCQCN_AND_NETWORK_CC, + UBCORE_CC_LDCP = 0x1 << UBCORE_TP_CC_LDCP, + UBCORE_CC_LDCP_AND_CAQM = 0x1 << UBCORE_TP_CC_LDCP_AND_CAQM, + UBCORE_CC_LDCP_AND_OPEN_CC = 0x1 << UBCORE_TP_CC_LDCP_AND_OPEN_CC, + UBCORE_CC_HC3 = 0x1 << UBCORE_TP_CC_HC3, + UBCORE_CC_DIP = 0x1 << UBCORE_TP_CC_DIP, + UBCORE_CC_ACC = 0x1 << UBCORE_TP_CC_ACC +}; + +enum ubcore_speed { + UBCORE_SP_10M = 0, + UBCORE_SP_100M, + UBCORE_SP_1G, + UBCORE_SP_2_5G, + UBCORE_SP_5G, + UBCORE_SP_10G, + UBCORE_SP_14G, + UBCORE_SP_25G, + UBCORE_SP_40G, + UBCORE_SP_50G, + UBCORE_SP_100G, + UBCORE_SP_200G, + UBCORE_SP_400G, + UBCORE_SP_800G +}; + +enum ubcore_link_width { + UBCORE_LINK_X1 = 0x1, + UBCORE_LINK_X2 = 0x1 << 1, + UBCORE_LINK_X4 = 0x1 << 2, + UBCORE_LINK_X8 = 0x1 << 3, + UBCORE_LINK_X16 = 0x1 << 4, + UBCORE_LINK_X32 = 0x1 << 5 +}; + +enum ubcore_port_state { + UBCORE_PORT_NOP = 0, + UBCORE_PORT_DOWN, + UBCORE_PORT_INIT, + UBCORE_PORT_ARMED, + UBCORE_PORT_ACTIVE, + UBCORE_PORT_ACTIVE_DEFER +}; + +union ubcore_device_feat { + struct { + uint32_t oor : 1; + uint32_t jfc_per_wr : 1; + uint32_t stride_op : 1; + uint32_t load_store_op : 1; + uint32_t non_pin : 1; + uint32_t pmem : 1; + uint32_t jfc_inline : 1; + uint32_t spray_en : 1; + uint32_t selective_retrans : 1; + uint32_t live_migrate : 1; + uint32_t dca : 1; + uint32_t jetty_grp : 1; + uint32_t err_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t mn : 1; + uint32_t clan : 1; + uint32_t muti_seg_per_token_id : 1; + uint32_t reserved : 15; + } bs; + uint32_t value; +}; + +union ubcore_atomic_feat { + struct { + uint32_t cas : 1; + uint32_t swap : 1; + uint32_t fetch_and_add : 1; + uint32_t fetch_and_sub : 1; + uint32_t fetch_and_and : 1; + uint32_t fetch_and_or : 1; + uint32_t fetch_and_xor : 1; + uint32_t reserved : 25; + } bs; + uint32_t value; +}; + +struct ubcore_port_status { + enum ubcore_port_state state; /* PORT_DOWN, PORT_INIT, PORT_ACTIVE */ + enum ubcore_speed active_speed; /* bandwidth */ + enum ubcore_link_width active_width; /* link width: X1, X2, X4 */ + enum ubcore_mtu active_mtu; +}; + +struct ubcore_device_status { + struct ubcore_port_status port_status[UBCORE_MAX_PORT_CNT]; +}; + +struct ubcore_port_attr { + enum ubcore_mtu max_mtu; /* MTU_256, MTU_512, MTU_1024 */ +}; + +enum ubcore_pattern { UBCORE_PATTERN_1 = 0, UBCORE_PATTERN_3 }; + +enum ubcore_sub_trans_mode_cap { + UBCORE_RC_TP_DST_ORDERING = 0x1, /* rc mode with tp dst ordering */ + UBCORE_RC_TA_DST_ORDERING = 0x1 + << 1, /* rc mode with ta dst ordering  */ + UBCORE_RC_USER_TP = 0x1 << 2, /* rc mode with user connection  */ +}; + +enum ubcore_mgmt_event_type { + UBCORE_MGMT_EVENT_EID_ADD, + UBCORE_MGMT_EVENT_EID_RMV, +}; + +union ubcore_order_type_cap { + struct { + uint32_t ot : 1; + uint32_t oi : 1; + uint32_t ol : 1; + uint32_t no : 1; + uint32_t reserved : 27; + } bs; + uint32_t value; +}; + +union urma_tp_type_cap { + struct { + uint32_t rtp : 1; + uint32_t ctp : 1; + uint32_t utp : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +union urma_tp_feature { + struct { + uint32_t rm_multi_path : 1; + uint32_t rc_multi_path : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; + +struct ubcore_device_cap { + union ubcore_device_feat feature; + uint32_t max_jfc; + uint32_t max_jfs; + uint32_t max_jfr; + uint32_t max_jetty; + uint32_t max_tp_cnt; + uint32_t max_tpg_cnt; + /* max_vtp_cnt_per_ue * max_ue_cnt Equal to the number of VTPs on the entire card */ + uint32_t max_vtp_cnt_per_ue; + uint32_t max_jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t max_rc; /* max rc queues */ + uint32_t max_jfc_depth; + uint32_t max_jfs_depth; + uint32_t max_jfr_depth; + uint32_t max_rc_depth; /* max depth of each rc queue */ + uint32_t max_jfs_inline_size; + uint32_t max_jfs_sge; + uint32_t max_jfs_rsge; + uint32_t max_jfr_sge; + uint64_t max_msg_size; + uint32_t max_read_size; + uint32_t max_write_size; + uint32_t max_cas_size; + uint32_t max_swap_size; + uint32_t max_fetch_and_add_size; + uint32_t max_fetch_and_sub_size; + uint32_t max_fetch_and_and_size; + uint32_t max_fetch_and_or_size; + uint32_t max_fetch_and_xor_size; + /* max read command outstanding count in the function entity */ + uint64_t max_rc_outstd_cnt; + uint32_t max_sip_cnt_per_ue; + uint32_t max_dip_cnt_per_ue; + uint32_t max_seid_cnt_per_ue; + uint16_t trans_mode; /* one or more from ubcore_transport_mode_t */ + uint16_t sub_trans_mode_cap; /* one or more from ubcore_sub_trans_mode_cap */ + uint16_t congestion_ctrl_alg; /* one or more mode from ubcore_congestion_ctrl_alg_t */ + uint16_t ceq_cnt; /* completion vector count */ + uint32_t max_tp_in_tpg; + uint32_t max_utp_cnt; + uint32_t max_oor_cnt; /* max OOR window size by packet */ + uint32_t mn; + uint32_t min_slice; + uint32_t max_slice; + union ubcore_atomic_feat atomic_feat; + uint32_t max_eid_cnt; + uint32_t max_upi_cnt; + uint32_t max_netaddr_cnt; + uint16_t max_ue_cnt; /* MUE: greater than or equal to 0; UE: must be 0 */ + uint64_t page_size_cap; + union ubcore_order_type_cap rm_order_cap; + union ubcore_order_type_cap rc_order_cap; + union urma_tp_type_cap rm_tp_cap; + union urma_tp_type_cap rc_tp_cap; + union urma_tp_type_cap um_tp_cap; + union urma_tp_feature tp_feature; +}; + +struct ubcore_guid { + uint8_t raw[UBCORE_GUID_SIZE]; +}; + +struct ubcore_device_attr { + struct ubcore_guid guid; + uint16_t ue_idx; + struct ubcore_device_cap dev_cap; + uint32_t reserved_jetty_id_min; + uint32_t reserved_jetty_id_max; + struct ubcore_port_attr port_attr[UBCORE_MAX_PORT_CNT]; + uint8_t port_cnt; + bool virtualization; /* In VM or not, must set by driver when register device */ + bool tp_maintainer; /* device used to maintain TP resource */ + enum ubcore_pattern pattern; +}; + +union ubcore_device_cfg_mask { + struct { + uint32_t rc_cnt : 1; + uint32_t rc_depth : 1; + uint32_t slice : 1; + uint32_t pattern : 1; + uint32_t virtualization : 1; + uint32_t suspend_period : 1; + uint32_t suspend_cnt : 1; + uint32_t min_jetty_cnt : 1; + uint32_t max_jetty_cnt : 1; + uint32_t min_jfr_cnt : 1; + uint32_t max_jfr_cnt : 1; + uint32_t reserved_jetty_id_min : 1; + uint32_t reserved_jetty_id_max : 1; + uint32_t reserved : 19; + } bs; + uint32_t value; +}; + +struct ubcore_congestion_control { + uint32_t data; +}; + +struct ubcore_rc_cfg { + uint32_t rc_cnt; /* rc queue count */ + uint32_t depth; +}; + +struct ubcore_device_cfg { + uint16_t ue_idx; /* ue id or mue id. e.g: bdf id */ + union ubcore_device_cfg_mask mask; + struct ubcore_rc_cfg rc_cfg; + uint32_t slice; /* TA slice size byte */ + uint8_t pattern; /* 0: pattern1; 1: pattern3 */ + bool virtualization; + uint32_t suspend_period; /* us */ + uint32_t suspend_cnt; /* TP resend cnt */ + uint32_t min_jetty_cnt; + uint32_t max_jetty_cnt; + uint32_t min_jfr_cnt; + uint32_t max_jfr_cnt; + uint32_t reserved_jetty_id_min; + uint32_t reserved_jetty_id_max; +}; + +/* struct [struct ubcore_user_ctl_in] should be consistent with [urma_user_ctl_in_t] */ +struct ubcore_user_ctl_in { + uint64_t addr; + uint32_t len; + uint32_t opcode; +}; + +/* struct [struct ubcore_user_ctl_out] should be consistent with [urma_user_ctl_out_t] */ +struct ubcore_user_ctl_out { + uint64_t addr; + uint32_t len; + uint32_t reserved; +}; + +struct ubcore_user_ctl { + struct ubcore_ucontext *uctx; + struct ubcore_user_ctl_in in; + struct ubcore_user_ctl_out out; + struct ubcore_udrv_priv udrv_data; +}; + +enum ubcore_net_addr_type { + UBCORE_NET_ADDR_TYPE_IPV4 = 0, + UBCORE_NET_ADDR_TYPE_IPV6 +}; + +union ubcore_net_addr_union { + uint8_t raw[UBCORE_NET_ADDR_BYTES]; + struct { + uint64_t reserved1; + uint32_t reserved2; + uint32_t addr; + } in4; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } in6; +}; + +struct ubcore_net_addr { + enum ubcore_net_addr_type type; + union ubcore_net_addr_union net_addr; + uint64_t vlan; /* available for UBOE */ + uint8_t mac[UBCORE_MAC_BYTES]; /* available for UBOE */ + uint32_t prefix_len; +}; + +union ubcore_tp_cfg_flag { + struct { + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t loopback : 1; + uint32_t ack_resp : 1; + uint32_t dca_enable : 1; + /* for the bonding case, the hardware selects the port + * ignoring the port of the tp context and + * selects the port based on the hash value + * along with the information in the bonding group table. + */ + uint32_t bonding : 1; + uint32_t reserved : 27; + } bs; + uint32_t value; +}; + +union ubcore_tp_mod_flag { + struct { + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is ubcore_tp_cc_alg_t */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t clan : 1; /* clan domain, 0: disable 1: enable */ + uint32_t reserved : 23; + } bs; + uint32_t value; +}; + +/* The first bits must be consistent with union ubcore_tp_cfg_flag */ +union ubcore_tp_flag { + struct { + uint32_t target : 1; /* 0: initiator, 1: target */ + uint32_t oor_en : 1; /* out of order receive, 0: disable 1: enable */ + uint32_t sr_en : 1; /* selective retransmission, 0: disable 1: enable */ + uint32_t cc_en : 1; /* congestion control algorithm, 0: disable 1: enable */ + uint32_t cc_alg : 4; /* The value is ubcore_tp_cc_alg_t */ + uint32_t spray_en : 1; /* spray with src udp port, 0: disable 1: enable */ + uint32_t loopback : 1; + uint32_t ack_resp : 1; + uint32_t dca_enable : 1; /* dynamic connection, 0: disable 1: enable */ + uint32_t bonding : 1; + uint32_t clan : 1; + uint32_t reserved : 18; + } bs; + uint32_t value; +}; + +enum ubcore_tp_state { + UBCORE_TP_STATE_RESET = 0, + UBCORE_TP_STATE_RTR, + UBCORE_TP_STATE_RTS, + UBCORE_TP_STATE_SUSPENDED, + UBCORE_TP_STATE_ERR +}; + +enum ubcore_ta_type { + UBCORE_TA_NONE = 0, + UBCORE_TA_JFS_TJFR, + UBCORE_TA_JETTY_TJETTY, + UBCORE_TA_VIRT /* virtualization */ +}; + +struct ubcore_ta { + enum ubcore_ta_type type; + union { + struct ubcore_jfs *jfs; + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; + }; + struct ubcore_jetty_id tjetty_id; /* peer jetty id */ +}; + +struct ubcore_tpg; +struct ubcore_tp_cfg { + union ubcore_tp_cfg_flag flag; /* flag of initial tp */ + /* transaction layer attributes */ + union { + union ubcore_eid local_eid; + struct ubcore_jetty_id local_jetty; + }; + uint16_t ue_idx; /* rc mode only */ + union { + union ubcore_eid peer_eid; + struct ubcore_jetty_id peer_jetty; + }; + /* transport layer attributes */ + enum ubcore_transport_mode trans_mode; + uint8_t retry_num; + uint8_t retry_factor; /* for calculate the time slot to retry */ + uint8_t ack_timeout; + uint8_t dscp; + uint32_t oor_cnt; /* OOR window size: by packet */ + struct ubcore_tpg *tpg; /* NULL if no tpg, eg.UM mode */ +}; + +struct ubcore_tp_ext { + uint64_t addr; + uint32_t len; +}; + +union ubcore_tp_attr_mask { + struct { + uint32_t flag : 1; + uint32_t peer_tpn : 1; + uint32_t state : 1; + uint32_t tx_psn : 1; + uint32_t rx_psn : 1; /* modify both rx psn and tx psn when restore tp */ + uint32_t mtu : 1; + uint32_t cc_pattern_idx : 1; + uint32_t peer_ext : 1; + uint32_t oos_cnt : 1; + uint32_t local_net_addr_idx : 1; + uint32_t peer_net_addr : 1; + uint32_t data_udp_start : 1; + uint32_t ack_udp_start : 1; + uint32_t udp_range : 1; + uint32_t hop_limit : 1; + uint32_t flow_label : 1; + uint32_t port_id : 1; + uint32_t mn : 1; + uint32_t peer_trans_type : 1; /* user tp only */ + uint32_t reserved : 13; + } bs; + uint32_t value; +}; + +struct ubcore_tp_attr { + union ubcore_tp_mod_flag flag; + uint32_t peer_tpn; + enum ubcore_tp_state state; + uint32_t tx_psn; + uint32_t rx_psn; + enum ubcore_mtu mtu; + uint8_t cc_pattern_idx; + struct ubcore_tp_ext peer_ext; + uint32_t oos_cnt; /* out of standing packet cnt */ + uint32_t local_net_addr_idx; + struct ubcore_net_addr peer_net_addr; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint8_t udp_range; + uint8_t hop_limit; + uint32_t flow_label; + uint8_t port_id; + uint8_t mn; /* 0~15, a packet contains only one msg if mn is set as 0 */ + enum ubcore_transport_type + peer_trans_type; /* Only for user tp connection */ +}; + +struct ubcore_tp { + uint32_t tpn; /* driver assigned in creating tp */ + uint32_t peer_tpn; + struct ubcore_device *ub_dev; + union ubcore_tp_flag flag; /* indicate initiator or target, etc */ + uint32_t local_net_addr_idx; + struct ubcore_net_addr peer_net_addr; + /* only for RC START */ + union { + union ubcore_eid local_eid; + struct ubcore_jetty_id local_jetty; + }; + union { + union ubcore_eid peer_eid; + struct ubcore_jetty_id peer_jetty; + }; + /* only for RC END */ + enum ubcore_transport_mode trans_mode; + enum ubcore_tp_state state; + uint32_t rx_psn; + uint32_t tx_psn; + enum ubcore_mtu mtu; + uint16_t data_udp_start; /* src udp port start, for multipath data */ + uint16_t ack_udp_start; /* src udp port start, for multipath ack */ + uint8_t udp_range; /* src udp port range, for both multipath data and ack */ + uint8_t port_id; /* optional, physical port, only for non-bonding */ + uint8_t retry_num; + uint8_t retry_factor; + uint8_t ack_timeout; + uint8_t dscp; + uint8_t cc_pattern_idx; + uint8_t hop_limit; + struct ubcore_tpg *tpg; /* NULL if no tpg, eg. UM mode */ + uint32_t oor_cnt; /* out of order window size for recv: packet cnt */ + uint32_t oos_cnt; /* out of order window size for send: packet cnt */ + struct ubcore_tp_ext tp_ext; /* driver fill in creating tp */ + struct ubcore_tp_ext peer_ext; /* ubcore fill before modifying tp */ + atomic_t use_cnt; + struct hlist_node hnode; /* driver inaccessible */ + struct kref ref_cnt; + struct completion comp; + uint32_t flow_label; + uint8_t mn; /* 0~15, a packet contains only one msg if mn is set as 0 */ + enum ubcore_transport_type + peer_trans_type; /* Only for user tp connection */ + struct mutex lock; /* protect TP state */ + void *priv; /* ubcore private data for tp management */ + uint32_t ue_idx; +}; + +struct ubcore_tpg_cfg { + /* transaction layer attributes */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + + /* tranport layer attributes */ + enum ubcore_transport_mode trans_mode; + uint8_t dscp; + enum ubcore_tp_cc_alg cc_alg; + uint8_t cc_pattern_idx; + uint32_t tp_cnt; + struct ubcore_net_addr local_net_addr; +}; + +struct ubcore_tpg_ext { + uint64_t addr; + uint32_t len; +}; + +struct ubcore_tpg { + uint32_t tpgn; + struct ubcore_device *ub_dev; + struct ubcore_tpg_cfg tpg_cfg; /* filled by ubcore when creating tp */ + struct ubcore_tpg_ext tpg_ext; /* filled by ubn driver when creating tp */ + struct ubcore_tpg_ext peer_ext; /* filled by ubcore before modifying tp */ + struct ubcore_tp * + tp_list[UBCORE_MAX_TP_CNT_IN_GRP]; // UBCORE_MAX_TP_CNT_IN_GRP=32 + struct hlist_node hnode; /* driver inaccessible */ + struct kref ref_cnt; + struct mutex mutex; + uint32_t ue_idx; + uint32_t peer_tpgn; // Only for tpg audit with peer, driver inaccessible +}; + +struct ubcore_cc_entry { + enum ubcore_tp_cc_alg alg; + uint8_t cc_pattern_idx; + uint8_t cc_priority; +} __packed; + +union ubcore_utp_cfg_flag { + struct { + uint32_t loopback : 1; + uint32_t spray_en : 1; + uint32_t clan : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +struct ubcore_utp_cfg { + /* transaction layer attributes */ + union ubcore_utp_cfg_flag flag; + uint16_t udp_start; // src udp port start + uint8_t udp_range; // src udp port range + uint32_t local_net_addr_idx; + struct ubcore_net_addr local_net_addr; + struct ubcore_net_addr peer_net_addr; + uint32_t flow_label; + uint8_t dscp; + uint8_t hop_limit; + uint32_t port_id; + enum ubcore_mtu mtu; +}; + +struct ubcore_utp { + uint32_t utpn; /* driver fills */ + struct ubcore_device *ub_dev; + struct ubcore_utp_cfg utp_cfg; /* filled by ubcore when createing utp. */ + struct hlist_node hnode; + struct kref ref_cnt; + uint32_t ue_idx; +}; + +struct ubcore_ctp_cfg { + struct ubcore_net_addr peer_net_addr; + uint32_t cna_len; +}; + +struct ubcore_ctp { + uint32_t ctpn; /* driver fills */ + struct ubcore_device *ub_dev; + struct ubcore_ctp_cfg ctp_cfg; /* filled by ubcore when createing cp. */ + atomic_t use_cnt; + struct hlist_node hnode; + struct kref ref_cnt; + struct completion comp; + uint32_t ue_idx; +}; + +enum ubcore_vtp_state { + UBCORE_VTPS_RESET = 0, + UBCORE_VTPS_READY = 1, + UBCORE_VTPS_WAIT_DESTROY = 2, +}; + +// for live migrate +enum ubcore_vtp_node_state { + STATE_NORMAL = 0, + STATE_READY, + STATE_MIGRATING, +}; + +#define UBCORE_VTPS_DELETED 2 + +struct ubcore_vtpn { + uint32_t vtpn; /* driver fills */ + struct ubcore_device *ub_dev; + /* ubcore private, inaccessible to driver */ + enum ubcore_transport_mode trans_mode; + /* vtpn key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t local_jetty; + uint32_t peer_jetty; + /* vtpn key end */ + uint32_t eid_index; + struct mutex state_lock; + enum ubcore_vtp_state state; /* protect by state_lock */ + struct hlist_node hnode; /* key: eid + jetty */ + struct hlist_node vtpn_hnode; /* key: vtpn */ + atomic_t use_cnt; + struct kref ref_cnt; + struct completion comp; + struct list_head node; /* vtpn node in vtpn_wait_list */ + struct list_head list; /* vtpn head to restore tjetty/jetty/cb node */ + struct list_head + disconnect_list; /* vtpn head to restore disconnect vtpn node */ + uint64_t tp_handle; + uint64_t peer_tp_handle; + uint64_t tag; + bool uspace; /* true: user space; false: kernel space */ +}; + +union ubcore_vtp_cfg_flag { + struct { + uint32_t clan_tp : 1; + uint32_t migrate : 1; + uint32_t reserve : 30; + } bs; + uint32_t value; +}; + +struct ubcore_vice_tpg_info { + struct ubcore_tpg *vice_tpg; + enum ubcore_vtp_node_state node_state; + uint32_t vice_role; +}; + +struct ubcore_vtp_cfg { + uint16_t ue_idx; // ueid or mueid + uint32_t vtpn; + uint32_t local_jetty; + /* key start */ + union ubcore_eid local_eid; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + /* key end */ + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + struct ubcore_utp *utp; // idx of dip + struct ubcore_ctp *ctp; /* valid when clan is true */ + }; +}; + +struct ubcore_vtp { + struct ubcore_device *ub_dev; + struct ubcore_vtp_cfg cfg; /* driver fills */ + struct ubcore_vice_tpg_info vice_tpg_info; + struct hlist_node hnode; /* driver inaccessible */ + uint32_t role; /* current side is initiator, target or duplex */ + struct kref ref_cnt; + uint32_t eid_idx; + uint32_t upi; + bool share_mode; +}; + +struct ubcore_vtp_attr { + union { + struct ubcore_tpg *tpg; + struct ubcore_tp *tp; + struct ubcore_utp *utp; // idx of dip + struct ubcore_ctp *ctp; /* clan domain */ + } tp; +}; + +union ubcore_vtp_attr_mask { + struct { + uint32_t tp : 1; + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +enum ubcore_msg_opcode { + /* 630 Verion msg start */ + UBCORE_MSG_CREATE_VTP = 0x0, + UBCORE_MSG_DESTROY_VTP = 0x1, + UBCORE_MSG_ALLOC_EID = 0x2, + UBCORE_MSG_DEALLOC_EID = 0x3, + UBCORE_MSG_CONFIG_DEVICE = 0x4, + UBCORE_MSG_VTP_STATUS_NOTIFY = 0x5, // MUE notify MUE/UE + UBCORE_MSG_UPDATE_EID_TABLE_NOTIFY = 0x6, // MUE notify MUE/UE + UBCORE_MSG_UE2MUE_TRANSFER = 0x7, // UE-MUE common transfer + UBCORE_MSG_STOP_PROC_VTP_MSG = 0x10, // Live migration + UBCORE_MSG_QUERY_VTP_MIG_STATUS = 0x11, // Live migration + UBCORE_MSG_FLOW_STOPPED = 0x12, // Live migration + UBCORE_MSG_MIG_ROLLBACK = 0x13, // Live migration + UBCORE_MSG_MIG_VM_START = 0x14, // Live migration + UBCORE_MSG_NEGO_VER = + 0x15, // Verion negotiation, processed by backend ubcore. + UBCORE_MSG_NOTIFY_FASTMSG_DRAIN = 0x16, + UBCORE_MSG_UPDATE_NET_ADDR = 0x17, + UBCORE_MSP_UPDATE_EID = 0x18 +}; + +struct ubcore_req { + uint32_t msg_id; + enum ubcore_msg_opcode opcode; + uint32_t len; + uint8_t data[]; +}; + +struct ubcore_req_host { + uint16_t src_ue_idx; + struct ubcore_req req; +}; + +struct ubcore_resp { + uint32_t msg_id; + enum ubcore_msg_opcode opcode; + uint32_t len; + uint8_t data[]; +}; + +struct ubcore_resp_host { + uint16_t dst_ue_idx; + struct ubcore_resp resp; +}; + +struct ubcore_event { + struct ubcore_device *ub_dev; + union { + struct ubcore_jfc *jfc; + struct ubcore_jfs *jfs; + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; + struct ubcore_jetty_group *jetty_grp; + struct ubcore_tp *tp; + struct ubcore_vtp *vtp; + uint32_t port_id; + uint32_t eid_idx; + } element; + enum ubcore_event_type event_type; +}; + +typedef void (*ubcore_event_callback_t)(struct ubcore_event *event, + struct ubcore_ucontext *ctx); + +struct ubcore_event_handler { + void (*event_callback)(struct ubcore_event *event, + struct ubcore_event_handler *handler); + struct list_head node; +}; + +typedef void (*ubcore_comp_callback_t)(struct ubcore_jfc *jfc); + +struct ubcore_jfc_cfg { + uint32_t depth; + union ubcore_jfc_flag flag; + uint32_t ceqn; + void *jfc_context; +}; + +struct ubcore_jfc { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfc_cfg jfc_cfg; + uint32_t id; /* allocated by driver */ + ubcore_comp_callback_t jfce_handler; + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfc; /* user space jfc pointer */ + struct hlist_node hnode; + atomic_t use_cnt; +}; + +struct ubcore_jfs_cfg { + uint32_t depth; + union ubcore_jfs_flag flag; + enum ubcore_transport_mode trans_mode; + uint32_t eid_index; + uint8_t priority; + uint8_t max_sge; + uint8_t max_rsge; + uint32_t max_inline_data; + uint8_t rnr_retry; + uint8_t err_timeout; + void *jfs_context; + struct ubcore_jfc *jfc; +}; + +struct ubcore_jfs { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfs_cfg jfs_cfg; + struct ubcore_jetty_id jfs_id; /* driver fill jfs_id->id */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfs; /* user space jfs pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct kref ref_cnt; + struct completion comp; + struct ubcore_hash_table + *tptable; /* Only for devices not natively supporting RM mode */ +}; + +struct ubcore_jfr_cfg { + uint32_t id; /* user may assign id */ + uint32_t depth; + union ubcore_jfr_flag flag; + enum ubcore_transport_mode trans_mode; + uint32_t eid_index; + uint8_t max_sge; + uint8_t min_rnr_timer; + struct ubcore_token token_value; + struct ubcore_jfc *jfc; + void *jfr_context; +}; + +struct ubcore_jfr { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jfr_cfg jfr_cfg; + struct ubcore_jetty_id jfr_id; /* driver fill jfr_id->id */ + ubcore_event_callback_t jfae_handler; + uint64_t urma_jfr; /* user space jfr pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct kref ref_cnt; + struct completion comp; + struct ubcore_hash_table + *tptable; /* Only for devices not natively supporting RM mode */ +}; + +union ubcore_jetty_flag { + struct { + uint32_t share_jfr : 1; /* 0: URMA_NO_SHARE_JFR. 1: URMA_SHARE_JFR. */ + uint32_t lock_free : 1; + uint32_t error_suspend : 1; + uint32_t outorder_comp : 1; + uint32_t order_type : 8; /* (0x0): default, auto config by driver */ + /* (0x1): OT, target ordering */ + /* (0x2): OI, initiator ordering */ + /* (0x3): OL, low layer ordering */ + /* (0x4): UNO, unreliable non ordering */ + uint32_t multi_path : 1; + uint32_t reserved : 19; + } bs; + uint32_t value; +}; + +struct ubcore_jetty_cfg { + uint32_t id; /* user may assign id */ + union ubcore_jetty_flag flag; + enum ubcore_transport_mode trans_mode; + uint32_t eid_index; + uint32_t jfs_depth; + uint8_t priority; + uint8_t max_send_sge; + uint8_t max_send_rsge; + uint32_t max_inline_data; + uint8_t rnr_retry; + uint8_t err_timeout; + uint32_t jfr_depth; /* deprecated */ + uint8_t min_rnr_timer; /* deprecated */ + uint8_t max_recv_sge; /* deprecated */ + struct ubcore_token token_value; /* deprecated */ + struct ubcore_jfc *send_jfc; + struct ubcore_jfc *recv_jfc; /* must set */ + struct ubcore_jfr *jfr; /* must set, shared jfr */ + struct ubcore_jetty_group + *jetty_grp; /* [Optional] user specified jetty group */ + void *jetty_context; +}; + +union ubcore_import_jetty_flag { + struct { + uint32_t token_policy : 3; + uint32_t order_type : 8; /* (0x0): default, auto config by driver */ + /* (0x1): OT, target ordering */ + /* (0x2): OI, initiator ordering */ + /* (0x3): OL, low layer ordering */ + /* (0x4): UNO, unreliable non ordering */ + uint32_t share_tp : 1; + uint32_t reserved : 20; + } bs; + uint32_t value; +}; + +enum ubcore_tp_type { UBCORE_RTP, UBCORE_CTP, UBCORE_UTP }; + +struct ubcore_tjetty_cfg { + struct ubcore_jetty_id + id; /* jfr, jetty or jetty group id to be imported */ + union ubcore_import_jetty_flag flag; + enum ubcore_transport_mode trans_mode; + uint32_t eid_index; + enum ubcore_target_type type; + enum ubcore_jetty_grp_policy policy; + struct ubcore_token + token_value; /* jfr, jetty or jetty group token_value to be imported */ + enum ubcore_tp_type tp_type; +}; + +struct ubcore_tjetty { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_tjetty_cfg cfg; + struct ubcore_tp *tp; + struct ubcore_vtpn *vtpn; + atomic_t use_cnt; + struct mutex lock; +}; + +struct ubcore_jetty { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jetty_cfg jetty_cfg; + struct ubcore_jetty_id jetty_id; /* driver fill jetty_id->id */ + struct ubcore_tjetty *remote_jetty; // bind to remote jetty + ubcore_event_callback_t jfae_handler; + uint64_t urma_jetty; /* user space jetty pointer */ + struct hlist_node hnode; + atomic_t use_cnt; + struct kref ref_cnt; + struct completion comp; + struct ubcore_hash_table + *tptable; /* Only for devices not natively supporting RM mode */ +}; + +union ubcore_jetty_grp_flag { + struct { + uint32_t token_policy : 3; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +struct ubcore_jetty_grp_cfg { + char name[UBCORE_JETTY_GRP_MAX_NAME]; + uint32_t eid_index; + union ubcore_jetty_grp_flag flag; + struct ubcore_token token_value; + uint32_t id; + enum ubcore_jetty_grp_policy policy; + uint64_t user_ctx; +}; + +struct ubcore_jetty_group { + struct ubcore_device *ub_dev; + struct ubcore_ucontext *uctx; + struct ubcore_jetty_grp_cfg jetty_grp_cfg; + struct ubcore_jetty_id jetty_grp_id; /* driver fill jetty_grp_id->id */ + uint32_t jetty_cnt; /* current jetty cnt in the jetty group */ + struct ubcore_jetty **jetty; + ubcore_event_callback_t jfae_handler; + uint64_t urma_jetty_grp; /* user space jetty_grp pointer */ + struct mutex lock; /* Protect jetty array */ +}; + +struct ubcore_import_cb { + void *user_arg; /* uburma_tjetty */ + void (*callback)(struct ubcore_tjetty *tjetty, int status, + void *user_arg); +}; + +struct ubcore_unimport_cb { + void *user_arg; + void (*callback)(int status, void *user_arg); +}; + +struct ubcore_bind_cb { + void *user_arg; /* uburma_tjetty */ + void (*callback)(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, int status, + void *user_arg); +}; + +struct ubcore_unbind_cb { + void *user_arg; + void (*callback)(int status, void *user_arg); +}; + +enum ubcore_tjetty_type { + UBCORE_IMPORT_JETTY_VTPN = 0, + UBCORE_BIND_JETTY_VTPN, + UBCORE_UNIMPORT_JETTY_VTPN, + UBCORE_UNBIND_JETTY_VTPN +}; + +struct ubcore_vtpn_wait_node { + struct list_head node; + enum ubcore_tjetty_type type; + struct ubcore_tjetty *tjetty; + struct ubcore_jetty *jetty; + struct ubcore_import_cb *import_cb; + struct ubcore_bind_cb *bind_cb; +}; + +struct ubcore_disconnect_vtpn_wait_node { + struct list_head node; + enum ubcore_tjetty_type type; + struct ubcore_unimport_cb *unimport_cb; + struct ubcore_unbind_cb *unbind_cb; +}; + +struct ubcore_wait_vtpn_para { + enum ubcore_tjetty_type type; + struct ubcore_tjetty *tjetty; + struct ubcore_jetty *jetty; + struct ubcore_import_cb *import_cb; + struct ubcore_bind_cb *bind_cb; +}; + +enum ubcore_res_key_type { + UBCORE_RES_KEY_VTP = 1, // key id: VTPN + UBCORE_RES_KEY_TP, // key id: TPN + UBCORE_RES_KEY_TPG, // key id: TPGN + UBCORE_RES_KEY_UTP, // key id: UTP ID + UBCORE_RES_KEY_JFS, // key id: JFS ID + UBCORE_RES_KEY_JFR, // key id: JFR ID + UBCORE_RES_KEY_JETTY, // key id: JETTY ID + UBCORE_RES_KEY_JETTY_GROUP, // key id: JETTY GROUP ID + UBCORE_RES_KEY_JFC, // key id: JFC ID + UBCORE_RES_KEY_RC, // key id: RC ID + UBCORE_RES_KEY_SEG, // key id: TOKEN ID + UBCORE_RES_KEY_DEV_TA, // key id: EID + UBCORE_RES_KEY_DEV_TP // key id: EID +}; + +struct ubcore_res_upi_val { + uint32_t upi; +}; + +struct ubcore_res_vtp_val { + uint16_t ue_idx; + uint32_t vtpn; + union ubcore_eid local_eid; + uint32_t local_jetty; + union ubcore_eid peer_eid; + uint32_t peer_jetty; + union ubcore_vtp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union { + uint32_t tpgn; + uint32_t tpn; + uint32_t utpn; + uint32_t ctpn; + }; +}; + +struct ubcore_res_tp_val { + uint32_t tpn; + uint32_t tx_psn; + uint32_t rx_psn; + uint8_t dscp; + uint8_t oor_en; + uint8_t selective_retrans_en; + uint8_t state; + uint16_t data_udp_start; + uint16_t ack_udp_start; + uint8_t udp_range; + uint32_t spray_en; +}; + +struct ubcore_res_tpg_val { + uint32_t tp_cnt; + uint8_t dscp; + uint32_t *tp_list; +}; + +struct ubcore_res_utp_val { + uint32_t utpn; + uint16_t data_udp_start; + uint8_t udp_range; + union ubcore_utp_cfg_flag flag; +}; + +struct ubcore_res_jfs_val { + uint32_t jfs_id; + uint8_t state; + uint32_t depth; + uint8_t priority; + uint32_t jfc_id; +}; + +struct ubcore_res_jfr_val { + uint32_t jfr_id; + uint8_t state; + uint32_t depth; + uint32_t jfc_id; +}; + +struct ubcore_res_jetty_val { + uint32_t jetty_id; + uint32_t send_jfc_id; + uint32_t recv_jfc_id; + uint32_t jfr_id; + uint32_t jfs_depth; + uint8_t state; + uint8_t priority; +}; + +struct ubcore_res_jetty_group_val { + uint32_t jetty_cnt; + uint32_t *jetty_list; +}; + +struct ubcore_res_jfc_val { + uint32_t jfc_id; + uint8_t state; + uint32_t depth; +}; + +struct ubcore_res_rc_val { + uint32_t type; // type of rc; read, ta-ack/ta-nak or atomic etc. + uint32_t rc_id; + uint16_t depth; + uint8_t state; +}; + +struct ubcore_seg_info { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t token_id; +}; + +struct ubcore_res_seg_val { + uint32_t seg_cnt; + struct ubcore_seg_info *seg_list; +}; + +struct ubcore_res_list_val { + uint32_t cnt; + uint32_t *list; +}; + +struct ubcore_res_dev_ta_val { + uint32_t seg_cnt; + uint32_t jfs_cnt; + uint32_t jfr_cnt; + uint32_t jfc_cnt; + uint32_t jetty_cnt; + uint32_t jetty_group_cnt; + uint32_t rc_cnt; +}; + +struct ubcore_res_dev_tp_val { + uint32_t vtp_cnt; + uint32_t tp_cnt; + uint32_t tpg_cnt; + uint32_t utp_cnt; +}; + +struct ubcore_res_key { + uint8_t type; /* refer to ubcore_res_key_type_t */ + uint32_t key; /* as UPI, key is ue_idx */ + uint32_t key_ext; /* only for vtp */ + uint32_t key_cnt; /* only for rc */ +}; + +struct ubcore_res_val { + uint64_t addr; /* allocated and free by ubcore */ + /* in&out. As a input parameter, + * it indicates the length allocated by the ubcore + * As a output parameter, it indicates the actual data length. + */ + uint32_t len; +}; + +union ubcore_jfs_wr_flag { + struct { + /* 0: There is no order with other WR. + * 1: relax order. + * 2: strong order. + * 3: reserve. + */ + uint32_t place_order : 2; + /* 0: There is no completion order with other WR + * 1: Completion order with previous WR. + */ + uint32_t comp_order : 1; + /* 0: There is no fence. + * 1: Fence with previous read and atomic WR + */ + uint32_t fence : 1; + /* 0: not solicited. + * 1: solicited. It will trigger an event + * on remote side + */ + uint32_t solicited_enable : 1; + /* 0: Do not notify local process + * after the task is complete. + * 1: Notify local process + * after the task is completed. + */ + uint32_t complete_enable : 1; + /* 0: No inline. + * 1: Inline data. + */ + uint32_t inline_flag : 1; + uint32_t reserved : 25; + } bs; + uint32_t value; +}; + +struct ubcore_sge { + uint64_t addr; + uint32_t len; + struct ubcore_target_seg *tseg; +}; + +struct ubcore_sg { + struct ubcore_sge *sge; + uint32_t num_sge; +}; + +struct ubcore_rw_wr { + struct ubcore_sg src; + struct ubcore_sg dst; + uint8_t target_hint; /* hint of jetty in a target jetty group */ + uint64_t notify_data; /* notify data or immeditate data in host byte order */ +}; + +struct ubcore_send_wr { + struct ubcore_sg src; + uint8_t target_hint; /* hint of jetty in a target jetty group */ + uint64_t imm_data; /* immeditate data in host byte order */ + struct ubcore_target_seg *tseg; /* Used only when send with invalidate */ +}; + +struct ubcore_cas_wr { + struct ubcore_sge *dst; /* len is the data length of CAS operation */ + struct ubcore_sge * + src; /* Local address for destination original value written back */ + union { + uint64_t cmp_data; /* When the len <= 8B, it indicates the CMP value. */ + uint64_t cmp_addr; /* When the len > 8B, it indicates the data address. */ + }; + union { + /* If destination value is the same as cmp_data, + * destination value will be change to swap_data. + */ + uint64_t swap_data; + uint64_t swap_addr; + }; +}; + +struct ubcore_faa_wr { + struct ubcore_sge + *dst; /* len in the sge is the length of faa at remote side */ + struct ubcore_sge * + src; /* Local address for destination original value written back */ + union { + uint64_t operand; /* Addend */ + uint64_t operand_addr; + }; +}; + +struct ubcore_jfs_wr { + enum ubcore_opcode opcode; + union ubcore_jfs_wr_flag flag; + uint64_t user_ctx; + struct ubcore_tjetty *tjetty; + union { + struct ubcore_rw_wr rw; + struct ubcore_send_wr send; + struct ubcore_cas_wr cas; + struct ubcore_faa_wr faa; + }; + struct ubcore_jfs_wr *next; +}; + +struct ubcore_jfr_wr { + struct ubcore_sg src; + uint64_t user_ctx; + struct ubcore_jfr_wr *next; +}; + +union ubcore_cr_flag { + struct { + uint8_t s_r : 1; /* Indicate CR stands for sending or receiving */ + uint8_t jetty : 1; /* Indicate id in the CR stands for jetty or JFS/JFR */ + uint8_t suspend_done : 1; + uint8_t flush_err_done : 1; + uint8_t reserved : 4; + } bs; + uint8_t value; +}; + +struct ubcore_cr_token { + uint32_t token_id; + struct ubcore_token token_value; +}; + +struct ubcore_cr { + enum ubcore_cr_status status; + uint64_t user_ctx; + enum ubcore_cr_opcode opcode; + union ubcore_cr_flag flag; + uint32_t completion_len; /* The number of bytes transferred */ + uint32_t local_id; /* Local jetty ID, or JFS ID, or JFR ID, depending on flag */ + /* Valid only for receiving CR. The remote jetty where received msg + * comes from, may be jetty ID or JFS ID, depending on flag. + */ + struct ubcore_jetty_id remote_id; + union { + uint64_t imm_data; /* Valid only for received CR */ + struct ubcore_cr_token invalid_token; + }; + uint32_t tpn; + uintptr_t user_data; /* Use as pointer to local jetty struct */ +}; + +enum ubcore_stats_key_type { + UBCORE_STATS_KEY_VTP = 1, + UBCORE_STATS_KEY_TP = 2, + UBCORE_STATS_KEY_TPG = 3, + UBCORE_STATS_KEY_JFS = 4, + UBCORE_STATS_KEY_JFR = 5, + UBCORE_STATS_KEY_JETTY = 6, + UBCORE_STATS_KEY_JETTY_GROUP = 7, + UBCORE_STATS_KEY_URMA_DEV = 8, +}; + +struct ubcore_stats_key { + uint8_t type; /* stats type, refer to enum ubcore_stats_key_type */ + uint32_t key; /* key can be tpn/tpgn/jetty_id/token_id/ctx_id/etc */ +}; + +struct ubcore_stats_com_val { + uint64_t tx_pkt; + uint64_t rx_pkt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint64_t tx_pkt_err; + uint64_t rx_pkt_err; +}; + +struct ubcore_stats_val { + /* this addr is alloc and free by ubcore, + * refer to struct ubcore_stats_com_val + */ + uint64_t addr; + /* [in/out] real length filled when success + * to query and buffer length enough; + * expected length filled and return failure when buffer length not enough + */ + uint32_t len; +}; + +union ubcore_utp_mod_flag { + struct { + uint32_t spray_en : 1; // Whether to enable end-side port number hashing, + // 0 : disabled, 1 : enabled + uint32_t reserved : 31; + } bs; + uint32_t value; +}; + +struct ubcore_utp_attr { + union ubcore_utp_mod_flag flag; + uint16_t data_udp_start; + uint8_t udp_range; +}; + +union ubcore_utp_attr_mask { + struct { + uint32_t flag : 1; + uint32_t udp_port : 1; + uint32_t udp_range : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +/* live migration struct */ +enum ubcore_mig_state { + UBCORE_MIG_STATE_START, + UBCORE_MIG_STATE_ROLLBACK, + UBCORE_MIG_STATE_FINISH +}; + +enum ubcore_mig_resp_status { + UBCORE_MIG_MSG_PROC_SUCCESS, + UBCORE_MIG_MSG_PROC_FAILURE, + UBCORE_VTP_MIG_COMPLETE, + UBCORE_VTP_MIG_UNCOMPLETE +}; + +struct ubcore_ue_stats { + uint64_t tx_pkt; + uint64_t rx_pkt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint64_t tx_pkt_err; + uint64_t rx_pkt_err; + uint64_t tx_timeout_cnt; + uint64_t rx_ce_pkt; +}; + +#pragma pack(1) +struct ubcore_tp_attr_value { + uint8_t retry_times_init : 3; + uint8_t at : 5; + uint8_t sip[UBCORE_IP_ADDR_BYTES]; + uint8_t dip[UBCORE_IP_ADDR_BYTES]; + uint8_t sma[UBCORE_MAC_BYTES]; + uint8_t dma[UBCORE_MAC_BYTES]; + uint16_t vlan_id : 12; + uint8_t vlan_en : 1; + uint8_t dscp : 6; + uint8_t at_times : 5; + uint8_t sl : 4; + uint8_t ttl; + uint8_t reserved[78]; +}; +#pragma pack() + +union ubcore_tp_handle { + struct { + uint64_t tpid : 24; + uint64_t tpn_start : 24; + uint64_t tp_cnt : 5; + uint64_t ctp : 1; + uint64_t rtp : 1; + uint64_t utp : 1; + uint64_t uboe : 1; + uint64_t pre_defined : 1; + uint64_t dynamic_defined : 1; + uint64_t reserved : 5; + } bs; + uint64_t value; +}; + +union ubcore_get_tp_cfg_flag { + struct { + uint32_t ctp : 1; + uint32_t rtp : 1; + uint32_t utp : 1; + uint32_t uboe : 1; + uint32_t pre_defined : 1; + uint32_t dynamic_defined : 1; + uint32_t reserved : 26; + } bs; + uint32_t value; +}; + +struct ubcore_get_tp_cfg { + union ubcore_get_tp_cfg_flag flag; + enum ubcore_transport_mode trans_mode; + union ubcore_eid local_eid; + union ubcore_eid peer_eid; +}; + +struct ubcore_tp_info { + union ubcore_tp_handle tp_handle; +}; + +struct ubcore_active_tp_attr { + uint32_t tx_psn; + uint32_t rx_psn; + uint64_t reserved; +}; + +struct ubcore_active_tp_cfg { + union ubcore_tp_handle tp_handle; + union ubcore_tp_handle peer_tp_handle; + uint64_t tag; + struct ubcore_active_tp_attr tp_attr; +}; + +struct ubcore_ops { + struct module *owner; /* kernel driver module */ + char driver_name[UBCORE_MAX_DRIVER_NAME]; /* user space driver name */ + uint32_t abi_version; /* abi version of kernel driver */ + + /** + * add a function entity id (eid) to ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] ue_idx: ue_idx; + * @param[in] cfg: eid and the upi of ue to which the eid belongs can be specified; + * @return: the index of eid/upi, less than 0 indicating error + */ + int (*add_ueid)(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg); + + /** + * delete a function entity id (eid) to ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] ue_idx: ue_idx; + * @param[in] cfg: eid and the upi of ue to which the eid belongs can be specified; + * @return: 0 on success, other value on error + */ + int (*delete_ueid)(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg); + + /** + * query device attributes + * @param[in] dev: the ub device handle; + * @param[out] attr: attributes for the driver to fill in + * @return: 0 on success, other value on error + */ + int (*query_device_attr)(struct ubcore_device *dev, + struct ubcore_device_attr *attr); + + /** + * query device status + * @param[in] dev: the ub device handle; + * @param[out] status: status for the driver to fill in + * @return: 0 on success, other value on error + */ + int (*query_device_status)(struct ubcore_device *dev, + struct ubcore_device_status *status); + + /** + * query resource + * @param[in] dev: the ub device handle; + * @param[in] key: resource type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ + int (*query_res)(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); + + /** + * config device + * @param[in] dev: the ub device handle; + * @param[in] cfg: device configuration + * @return: 0 on success, other value on error + */ + int (*config_device)(struct ubcore_device *dev, + struct ubcore_device_cfg *cfg); + + /** + * set ub network address + * @param[in] dev: the ub device handle; + * @param[in] net_addr: net_addr to set + * @param[in] index: index by sip table + * @return: 0 on success, other value on error + */ + int (*add_net_addr)(struct ubcore_device *dev, + struct ubcore_net_addr *net_addr, uint32_t index); + + /** + * unset ub network address + * @param[in] dev: the ub device handle; + * @param[in] idx: net_addr idx by sip table entry + * @return: 0 on success, other value on error + */ + int (*delete_net_addr)(struct ubcore_device *dev, uint32_t idx); + + /** + * allocate a context from ubep for a user process + * @param[in] dev: the ub device handle; + * @param[in] eid: function entity id (eid) index to set; + * @param[in] udrv_data: user space driver data + * @return: pointer to user context on success, null or error, + */ + struct ubcore_ucontext *(*alloc_ucontext)( + struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data); + + /** + * free a context to ubep + * @param[in] uctx: the user context created before; + * @return: 0 on success, other value on error + */ + int (*free_ucontext)(struct ubcore_ucontext *uctx); + + /** + * mmap doorbell or jetty buffer, etc + * @param[in] uctx: the user context created before; + * @param[in] vma: linux vma including vm_start, vm_pgoff, etc; + * @return: 0 on success, other value on error + */ + int (*mmap)(struct ubcore_ucontext *ctx, struct vm_area_struct *vma); + + /* segment part */ + /** alloc token id to ubep + * @param[in] dev: the ub device handle; + * @param[in] flag: token_id_flag; + * @param[in] udata: ucontext and user space driver data + * @return: token id pointer on success, NULL on error + */ + struct ubcore_token_id *(*alloc_token_id)( + struct ubcore_device *dev, union ubcore_token_id_flag flag, + struct ubcore_udata *udata); + + /** free key id from ubep + * @param[in] token_id: the token id alloced before; + * @return: 0 on success, other value on error + */ + int (*free_token_id)(struct ubcore_token_id *token_id); + + /** register segment to ubep + * @param[in] dev: the ub device handle; + * @param[in] cfg: segment attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: target segment pointer on success, NULL on error + */ + struct ubcore_target_seg *(*register_seg)(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); + + /** unregister segment from ubep + * @param[in] tseg: the segment registered before; + * @return: 0 on success, other value on error + */ + int (*unregister_seg)(struct ubcore_target_seg *tseg); + + /** import a remote segment to ubep + * @param[in] dev: the ub device handle; + * @param[in] cfg: segment attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target segment handle on success, NULL on error + */ + struct ubcore_target_seg *(*import_seg)( + struct ubcore_device *dev, struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); + + /** unimport seg from ubep + * @param[in] tseg: the segment imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_seg)(struct ubcore_target_seg *tseg); + + /** add port for bound device + * @param[in] dev: the ub device handle; + * @param[in] port_cnt: port count + * @param[in] port_list: port list + * @return: target segment handle on success, NULL on error + */ + int (*add_port)(struct ubcore_device *dev, uint32_t port_cnt, + uint32_t *port_list); + + /* jetty part */ + /** + * create jfc with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfc attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfc pointer on success, NULL on error + */ + struct ubcore_jfc *(*create_jfc)(struct ubcore_device *dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata); + + /** + * modify jfc from ubep. + * @param[in] jfc: the jfc created before; + * @param[in] attr: ubcore jfc attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfc)(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); + + /** + * destroy jfc from ubep. + * @param[in] jfc: the jfc created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfc)(struct ubcore_jfc *jfc); + + /** + * batch destroy jfc from ubep. + * @param[in] jfc_arr: the jfc array created before; + * @param[in] jfc_num: jfc array length; + * @param[out] bad_jfc_index: when delete err, return jfc index in the array; + * @return: 0 on success, other value on error + */ + int (*destroy_jfc_batch)(struct ubcore_jfc **jfc_arr, int jfc_num, + int *bad_jfc_index); + + /** + * rearm jfc. + * @param[in] jfc: the jfc created before; + * @param[in] solicited_only: rearm notify by message marked with solicited flag + * @return: 0 on success, other value on error + */ + int (*rearm_jfc)(struct ubcore_jfc *jfc, bool solicited_only); + + /** + * create jfs with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfs attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfs pointer on success, NULL on error + */ + struct ubcore_jfs *(*create_jfs)(struct ubcore_device *dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[in] attr: ubcore jfs attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfs)(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); + /** + * query jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[out] cfg: jfs configurations; + * @param[out] attr: ubcore jfs attributes; + * @return: 0 on success, other value on error + */ + int (*query_jfs)(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); + /** + * flush jfs from ubep. + * @param[in] jfs: the jfs created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of CR returned, 0 means no completion record returned, -1 on error + */ + int (*flush_jfs)(struct ubcore_jfs *jfs, int cr_cnt, + struct ubcore_cr *cr); + /** + * destroy jfs from ubep. + * @param[in] jfs: the jfs created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfs)(struct ubcore_jfs *jfs); + /** + * batch destroy jfs from ubep. + * @param[in] jfs_arr: the jfs array created before; + * @param[in] jfs_num: jfs array length; + * @param[out] bad_jfs_index: when error, return error jfs index in the array; + * @return: 0 on success, other value on error + */ + int (*destroy_jfs_batch)(struct ubcore_jfs **jfs_arr, int jfs_num, + int *bad_jfs_index); + /** + * create jfr with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jfr attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jfr pointer on success, NULL on error + */ + struct ubcore_jfr *(*create_jfr)(struct ubcore_device *dev, + struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jfr from ubep. + * @param[in] jfr: the jfr created before; + * @param[in] attr: ubcore jfr attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jfr)(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); + /** + * query jfr from ubep. + * @param[in] jfr: the jfr created before; + * @param[out] cfg: jfr configurations; + * @param[out] attr: ubcore jfr attributes; + * @return: 0 on success, other value on error + */ + int (*query_jfr)(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); + /** + * destroy jfr from ubep. + * @param[in] jfr: the jfr created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jfr)(struct ubcore_jfr *jfr); + /** + * batch destroy jfr from ubep. + * @param[in] jfr_arr: the jfr array created before; + * @param[in] jfr_num: jfr array length; + * @param[out] bad_jfr_index: when error, return error jfr index in the array; + * @return: 0 on success, other value on error + */ + int (*destroy_jfr_batch)(struct ubcore_jfr **jfr_arr, int jfr_num, + int *bad_jfr_index); + /** + * import jfr to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jfr attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jfr)(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + + /** + * import jfr to ubep by control plane. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jfr attributes and import configurations; + * @param[in] active_tp_cfg: tp configuration to active; + * @param[in] udata: ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jfr_ex)( + struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); + + /** + * unimport jfr from ubep. + * @param[in] tjfr: the target jfr imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_jfr)(struct ubcore_tjetty *tjfr); + + /** + * create jetty with ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: jetty attributes and configurations + * @param[in] udata: ucontext and user space driver data + * @return: jetty pointer on success, NULL on error + */ + struct ubcore_jetty *(*create_jetty)(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] attr: ubcore jetty attr; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*modify_jetty)(struct ubcore_jetty *jetty, + struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); + /** + * query jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[out] cfg: jetty configurations; + * @param[out] attr: ubcore jetty attributes; + * @return: 0 on success, other value on error + */ + int (*query_jetty)(struct ubcore_jetty *jetty, + struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); + /** + * flush jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of CR returned, 0 means no completion record returned, -1 on error + */ + int (*flush_jetty)(struct ubcore_jetty *jetty, int cr_cnt, + struct ubcore_cr *cr); + /** + * destroy jetty from ubep. + * @param[in] jetty: the jetty created before; + * @return: 0 on success, other value on error + */ + int (*destroy_jetty)(struct ubcore_jetty *jetty); + /** + * batch destroy jetty from ubep. + * @param[in] jetty_arr: the jetty array created before; + * @param[in] jetty_num: jetty array length; + * @param[out] bad_jetty_index: when error, return error jetty index in the array; + * @return: 0 on success, other value on error + */ + int (*destroy_jetty_batch)(struct ubcore_jetty **jetty_arr, + int jetty_num, int *bad_jetty_index); + /** + * import jetty to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] udata: ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jetty)(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + + /** + * import jetty to ubep by control plane. + * @param[in] dev: the ub device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] active_tp_cfg: tp configuration to active + * @param[in] udata: ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ + struct ubcore_tjetty *(*import_jetty_ex)( + struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); + + /** + * unimport jetty from ubep. + * @param[in] tjetty: the target jetty imported before; + * @return: 0 on success, other value on error + */ + int (*unimport_jetty)(struct ubcore_tjetty *tjetty); + /** + * bind jetty from ubep. + * @param[in] jetty: the jetty created before; + * @param[in] tjetty: the target jetty imported before; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*bind_jetty)(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); + + /** + * bind jetty from ubep by control plane. + * @param[in] jetty: the jetty created before; + * @param[in] tjetty: the target jetty imported before; + * @param[in] active_tp_cfg: tp configuration to active; + * @param[in] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*bind_jetty_ex)(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); + + /** + * unbind jetty from ubep. + * @param[in] jetty: the jetty binded before; + * @return: 0 on success, other value on error + */ + int (*unbind_jetty)(struct ubcore_jetty *jetty); + + /** + * create jetty group to ubep. + * @param[in] dev: the ub device handle; + * @param[in] cfg: pointer of the jetty group config; + * @param[in] udata: ucontext and user space driver data + * @return: jetty group pointer on success, NULL on error + */ + struct ubcore_jetty_group *(*create_jetty_grp)( + struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, + struct ubcore_udata *udata); + /** + * destroy jetty group to ubep. + * @param[in] jetty_grp: the jetty group created before; + * @return: 0 on success, other value on error + */ + int (*delete_jetty_grp)(struct ubcore_jetty_group *jetty_grp); + + /** + * create tpg. + * @param[in] dev: the ub device handle; + * @param[in] cfg: tpg init attributes + * @param[in] udata: ucontext and user space driver data + * @return: tp pointer on success, NULL on error + */ + struct ubcore_tpg *(*create_tpg)(struct ubcore_device *dev, + struct ubcore_tpg_cfg *cfg, + struct ubcore_udata *udata); + /** + * destroy tpg. + * @param[in] tp: tp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_tpg)(struct ubcore_tpg *tpg); + + /** + * get tpid list by control plane. + * @param[in] dev: ubcore device pointer created before + * @param[in] cfg: tpid configuration to be matched + * @param[in && out] tp_cnt: tp_cnt is the length of tp_list buffer as in parameter; + * tp_cnt is the number of tp as out parameter + * @param[out] tp_list: tp list to get, the buffer is allocated by user; + * @param[in && out] udata: ucontext and user space driver data + * @return: 0 on success, other value on error + */ + int (*get_tp_list)(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *cfg, uint32_t *tp_cnt, + struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata); + + /** + * set tp attributions by control plane. + * @param[in] dev: ubcore device pointer created before; + * @param[in] tp_handle: tp_handle got by ubcore_get_tp_list; + * @param[in] tp_attr_cnt: number of tp attributions; + * @param[in] tp_attr_bitmap: tp attributions bitmap, current bitmap is as follow: + * 0-retry_times_init: 3 bit 1-at: 5 bit 2-SIP: 128 bit + * 3-DIP: 128 bit 4-SMA: 48 bit 5-DMA: 48 bit + * 6-vlan_id: 12 bit 7-vlan_en: 1 bit 8-dscp: 6 bit + * 9-at_times: 5 bit 10-sl: 4 bit 11-tti: 8 bit + * @param[in] tp_attr: tp attribution values to set; + * @param[in && out] udata: ucontext and user space driver data; + * @return: 0 on success, other value on error + */ + int (*set_tp_attr)(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, + const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata); + + /** + * get tp attributions by control plane. + * @param[in] dev: ubcore device pointer created before; + * @param[in] tp_handle: tp_handle got by ubcore_get_tp_list; + * @param[out] tp_attr_cnt: number of tp attributions; + * @param[out] tp_attr_bitmap: tp bitmap, the same as tp_attr_bitmap in set_tp_attr; + * @param[out] tp_attr: tp attribution values to get; + * @param[in && out] udata: ucontext and user space driver data; + * @return: 0 on success, other value on error + */ + int (*get_tp_attr)(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata); + + /** + * active tp by control plane. + * @param[in] dev: ubcore device pointer created before + * @param[in] active_cfg: tp configuration to active + * @return: 0 on success, other value on error + */ + int (*active_tp)(struct ubcore_device *dev, + struct ubcore_active_tp_cfg *active_cfg); + + /** + * deactivate tp by control plane. + * @param[in] dev: ubcore device pointer created before + * @param[in] tp_handle: tp_handle value got before + * @param[in] udata: [Optional] udata should be NULL when called + * by kernel application and be valid when called + * by user space application + * @return: 0 on success, other value on error + */ + int (*deactive_tp)(struct ubcore_device *dev, + union ubcore_tp_handle tp_handle, + struct ubcore_udata *udata); + + /** + * create tp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: tp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: tp pointer on success, NULL on error + */ + struct ubcore_tp *(*create_tp)(struct ubcore_device *dev, + struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata); + /** + * modify tp. + * @param[in] tp: tp pointer created before + * @param[in] attr: tp attributes + * @param[in] mask: attr mask indicating the attributes to be modified + * @return: 0 on success, other value on error + */ + int (*modify_tp)(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); + /** + * modify user tp. + * @param[in] dev: the ub device handle + * @param[in] tpn: tp number of the tp created before + * @param[in] cfg: user configuration of the tp + * @param[in] attr: tp attributes + * @param[in] mask: attr mask indicating the attributes to be modified + * @return: 0 on success, other value on error + */ + int (*modify_user_tp)(struct ubcore_device *dev, uint32_t tpn, + struct ubcore_tp_cfg *cfg, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask mask); + /** + * destroy tp. + * @param[in] tp: tp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_tp)(struct ubcore_tp *tp); + + /** + * create multi tp. + * @param[in] dev: the ub device handle; + * @param[in] cnt: the number of tp, must be less than or equal to 32; + * @param[in] cfg: array of tp init attributes + * @param[in] udata: array of ucontext and user space driver data + * @param[out] tp: pointer array of tp + * @return: created tp cnt, 0 on error + */ + int (*create_multi_tp)(struct ubcore_device *dev, uint32_t cnt, + struct ubcore_tp_cfg *cfg, + struct ubcore_udata *udata, + struct ubcore_tp **tp); + /** + * modify multi tp. + * @param[in] cnt: the number of tp; + * @param[in] tp: pointer array of tp created before + * @param[in] attr: array of tp attributes + * @param[in] mask: array of attr mask indicating the attributes to be modified + * @param[in] fail_tp: pointer of tp failed to modify + * @return: modified successfully tp cnt, 0 on error + */ + int (*modify_multi_tp)(uint32_t cnt, struct ubcore_tp **tp, + struct ubcore_tp_attr *attr, + union ubcore_tp_attr_mask *mask, + struct ubcore_tp **fail_tp); + /** + * destroy multi tp. + * @param[in] cnt: the number of tp; + * @param[in] tp: pointer array of tp created before + * @return: destroyed tp cnt, 0 on error + */ + int (*destroy_multi_tp)(uint32_t cnt, struct ubcore_tp **tp); + + /** + * allocate vtp. + * @param[in] dev: the ub device handle; + * @return: vtpn pointer on success, NULL on error + */ + struct ubcore_vtpn *(*alloc_vtpn)(struct ubcore_device *dev); + + /** + * free vtpn. + * @param[in] vtpn: vtpn pointer allocated before + * @return: 0 on success, other value on error + */ + int (*free_vtpn)(struct ubcore_vtpn *vtpn); + + /** + * create vtp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: vtp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: vtp pointer on success, NULL on error + */ + struct ubcore_vtp *(*create_vtp)(struct ubcore_device *dev, + struct ubcore_vtp_cfg *cfg, + struct ubcore_udata *udata); + /** + * destroy vtp. + * @param[in] vtp: vtp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_vtp)(struct ubcore_vtp *vtp); + + /** + * create utp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: utp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: utp pointer on success, NULL on error + */ + struct ubcore_utp *(*create_utp)(struct ubcore_device *dev, + struct ubcore_utp_cfg *cfg, + struct ubcore_udata *udata); + /** + * destroy utp. + * @param[in] utp: utp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_utp)(struct ubcore_utp *utp); + + /** + * create ctp. + * @param[in] dev: the ub device handle; + * @param[in] cfg: ctp init attributes + * @param[in] udata: ucontext and user space driver data + * @return: ctp pointer on success, NULL on error + */ + struct ubcore_ctp *(*create_ctp)(struct ubcore_device *dev, + struct ubcore_ctp_cfg *cfg, + struct ubcore_udata *udata); + /** + * destroy ctp. + * @param[in] ctp: ctp pointer created before + * @return: 0 on success, other value on error + */ + int (*destroy_ctp)(struct ubcore_ctp *ctp); + + /** + * UE send msg to MUE device. + * @param[in] dev: UE or MUE device; + * @param[in] msg: msg to send; + * @return: 0 on success, other value on error + */ + int (*send_req)(struct ubcore_device *dev, struct ubcore_req *msg); + + /** + * MUE send msg to UE device. + * @param[in] dev: MUE device; + * @param[in] msg: msg to send; + * @return: 0 on success, other value on error + */ + int (*send_resp)(struct ubcore_device *dev, + struct ubcore_resp_host *msg); + + /** + * query cc table to get cc pattern idx + * @param[in] dev: the ub device handle; + * @param[in] cc_entry_cnt: cc entry cnt; + * @return: return NULL on fail, otherwise, return cc entry array + */ + struct ubcore_cc_entry *(*query_cc)(struct ubcore_device *dev, + uint32_t *cc_entry_cnt); + + /** + * bond slave net device + * @param[in] bond: bond netdev; + * @param[in] slave: slave netdev; + * @param[in] upper_info: change upper event info; + * @return: 0 on success, other value on error + */ + int (*bond_add)(struct net_device *bond, struct net_device *slave, + struct netdev_lag_upper_info *upper_info); + + /** + * unbond slave net device + * @param[in] bond: bond netdev; + * @param[in] slave: slave netdev; + * @return: 0 on success, other value on error + */ + int (*bond_remove)(struct net_device *bond, struct net_device *slave); + + /** + * update slave net device + * @param[in] bond: bond netdev; + * @param[in] slave: slave netdev; + * @param[in] lower_info: change lower state event info; + * @return: 0 on success, other value on error + */ + int (*slave_update)(struct net_device *bond, struct net_device *slave, + struct netdev_lag_lower_state_info *lower_info); + + /** + * operation of user ioctl cmd. + * @param[in] dev: the ub device handle; + * @param[in] user_ctl: kdrv user control command pointer; + * Return: 0 on success, other value on error + */ + int (*user_ctl)(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl); + + /** data path ops */ + /** + * post jfs wr. + * @param[in] jfs: the jfs created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jfs_wr)(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); + /** + * post jfr wr. + * @param[in] jfr: the jfr created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jfr_wr)(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); + /** + * post jetty send wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jetty_send_wr)(struct ubcore_jetty *jetty, + struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); + /** + * post jetty receive wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ + int (*post_jetty_recv_wr)(struct ubcore_jetty *jetty, + struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); + /** + * poll jfc. + * @param[in] jfc: the jfc created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be polled; + * @return: 0 on success, other value on error + */ + int (*poll_jfc)(struct ubcore_jfc *jfc, int cr_cnt, + struct ubcore_cr *cr); + + /** + * query_stats. success to query and buffer length is enough + * @param[in] dev: the ub device handle; + * @param[in] key: type and key value of the ub device to query; + * @param[in/out] val: address and buffer length of query results + * @return: 0 on success, other value on error + */ + int (*query_stats)(struct ubcore_device *dev, + struct ubcore_stats_key *key, + struct ubcore_stats_val *val); + /** + * config function migrate state. + * @param[in] dev: the ub device handle; + * @param[in] ue_idx: ue id; + * @param[in] cnt: config count; + * @param[in] cfg: eid and the upi of ue to which the eid belongs can be specified; + * @param[in] state: config state (start, rollback and finish) + * @return: config success count, -1 on error + */ + int (*config_function_migrate_state)(struct ubcore_device *dev, + uint16_t ue_idx, uint32_t cnt, + struct ubcore_ueid_cfg *cfg, + enum ubcore_mig_state state); + /** + * modify vtp. + * @param[in] vtp: vtp pointer to be modified; + * @param[in] attr: vtp attr, tp that we want to change; + * @param[in] mask: attr mask; + * @return: 0 on success, other value on error + */ + int (*modify_vtp)(struct ubcore_vtp *vtp, struct ubcore_vtp_attr *attr, + union ubcore_vtp_attr_mask *mask); + /** + * query ue index. + * @param[in] dev: the ub device handle; + * @param[in] devid: ue devid to query + * @param[out] ue_idx: ue id; + * @return: 0 on success, other value on error + */ + int (*query_ue_idx)(struct ubcore_device *dev, + struct ubcore_devid *devid, uint16_t *ue_idx); + /** + * config dscp-vl mapping + * @param[in] dev:the ub dev handle; + * @param[in] dscp: the dscp value array + * @param[in] vl: the vl value array + * @param[in] num: array num + * @return: 0 on success, other value on error + */ + int (*config_dscp_vl)(struct ubcore_device *dev, uint8_t *dscp, + uint8_t *vl, uint8_t num); + /** + * query ue stats, for migration currently. + * @param[in] dev: the ub device handle; + * @param[in] cnt: array count; + * @param[in] ue_idx: ue id array; + * @param[out] stats: ue counters + * @return: 0 on success, other value on error + */ + int (*query_ue_stats)(struct ubcore_device *dev, uint32_t cnt, + uint16_t *ue_idx, struct ubcore_ue_stats *stats); + + /** + * query dscp-vl mapping + * @param[in] dev:the ub dev handle; + * @param[in] dscp: the dscp value array + * @param[in] num: array num + * @param[out] vl: the vl value array + * @return: 0 on success, other value on error + */ + int (*query_dscp_vl)(struct ubcore_device *dev, uint8_t *dscp, + uint8_t num, uint8_t *vl); + + /** + * When UVS or UB dataplane is running: + * 1. disassociate_ucontext != NULL means support rmmod driver. + * 2. disassociate_ucontext == NULL means rmmod driver will fail because module is in use. + * If disassociate_ucontext != NULL: + * 1. When remove MUE/UE device, will call it; + * 2. When remove MUE device, will not call it because there are no uctx. + * @param[in] uctx: the ubcore_ucontext + */ + void (*disassociate_ucontext)(struct ubcore_ucontext *uctx); +}; + +struct ubcore_bitmap { + unsigned long *bits; + uint32_t size; + spinlock_t lock; +}; + +enum ubcore_hash_table_type { + UBCORE_HT_JFS = 0, /* jfs hash table */ + UBCORE_HT_JFR, /* jfr hash table */ + UBCORE_HT_JFC, /* jfc hash table */ + UBCORE_HT_JETTY, /* jetty hash table */ + UBCORE_HT_TP, /* tp table */ + UBCORE_HT_TPG, /* tpg table */ + UBCORE_HT_RM_VTP, /* rm vtp table */ + UBCORE_HT_RC_VTP, /* rc vtp table */ + UBCORE_HT_UM_VTP, /* um vtp table */ + UBCORE_HT_RM_VTPN, /* rm vtpn table */ + UBCORE_HT_RC_VTPN, /* rc vtpn table */ + UBCORE_HT_UM_VTPN, /* um vtpn table */ + UBCORE_HT_CP_VTPN, /* vtpn table for control plane */ + UBCORE_HT_UTP, /* utp table */ + UBCORE_HT_VTPN, /* vtpn table */ + UBCORE_HT_CTP, /* ctp table */ + UBCORE_HT_EX_TP, /* exchange tp info for control plane */ + UBCORE_HT_RM_CTP_ID, /* key: seid + deid + tag */ + UBCORE_HT_RC_CTP_ID, /* seid + deid + sjettyid + djettyid + tag */ + UBCORE_HT_RM_TP_ID, /* key: seid + deid + tag */ + UBCORE_HT_RC_TP_ID, /* seid + deid + sjettyid + djettyid + tag */ + UBCORE_HT_UTP_ID, /* key: seid + deid + tag */ + UBCORE_HT_NUM +}; + +struct ubcore_eid_entry { + union ubcore_eid eid; + uint32_t eid_index; + struct net *net; + bool valid; +}; + +struct ubcore_eid_table { + uint32_t eid_cnt; + struct ubcore_eid_entry *eid_entries; + spinlock_t lock; +}; + +struct ubcore_sip_info { + char dev_name[UBCORE_MAX_DEV_NAME]; + struct ubcore_net_addr addr; + uint32_t prefix_len; + uint8_t port_cnt; + uint8_t port_id[UBCORE_MAX_PORT_CNT]; + uint32_t mtu; + char netdev_name[UBCORE_MAX_DEV_NAME]; /* for change mtu */ + bool is_active; +}; + +struct ubcore_sip_entry { + struct ubcore_sip_info sip_info; + atomic_t uvs_cnt; + uint64_t reserve; +}; + +struct ubcore_sip_table { + struct mutex lock; + uint32_t max_sip_cnt; + struct ubcore_sip_entry *entry; + DECLARE_BITMAP(index_bitmap, UBCORE_MAX_SIP); +}; + +struct ubcore_port_kobj { + struct kobject kobj; + struct ubcore_device *dev; + uint8_t port_id; +}; + +struct ubcore_eid_attr { + char name[UBCORE_EID_GROUP_NAME_LEN]; + uint32_t eid_idx; + struct device_attribute attr; +}; + +struct ubcore_logic_device { + struct device *dev; + struct ubcore_port_kobj port[UBCORE_MAX_PORT_CNT]; + struct list_head node; /* add to ldev list */ + possible_net_t net; + struct ubcore_device *ub_dev; + const struct attribute_group *dev_group[UBCORE_ATTR_GROUP_MAX]; +}; + +struct ubcore_vtp_bitmap { + struct mutex lock; + uint32_t max_vtp_cnt; + uint64_t *bitmap; +}; + +struct ubcore_device { + struct list_head list_node; /* add to device list */ + + /* driver fills start */ + char dev_name[UBCORE_MAX_DEV_NAME]; + + struct device *dma_dev; + struct device dev; + struct net_device *netdev; + struct ubcore_ops *ops; + enum ubcore_transport_type transport_type; + struct ubcore_device_attr attr; + struct attribute_group + *group[UBCORE_MAX_ATTR_GROUP]; /* driver may fill group [1] */ + /* driver fills end */ + + struct ubcore_device_cfg cfg; + + /* port management */ + struct list_head port_list; + + /* For ubcore client */ + struct rw_semaphore client_ctx_rwsem; + struct list_head client_ctx_list; + struct list_head event_handler_list; + struct rw_semaphore event_handler_rwsem; + struct ubcore_hash_table ht[UBCORE_HT_NUM]; /* to be replaced with uobj */ + + /* protect from unregister device */ + atomic_t use_cnt; + struct completion comp; + bool dynamic_eid; /* Assign eid dynamically with netdev notifier */ + struct ubcore_eid_table eid_table; + struct ubcore_cg_device cg_device; + struct ubcore_sip_table sip_table; + + /* logic device list and mutex */ + struct ubcore_logic_device ldev; + struct mutex ldev_mutex; + struct list_head ldev_list; + + /* ue_idx to uvs_instance mapping */ + void **ue2uvs_table; + struct rw_semaphore ue2uvs_rwsem; + + /* for vtp audit */ + struct ubcore_vtp_bitmap vtp_bitmap; +}; + +struct ubcore_client { + struct list_head list_node; + char *client_name; + int (*add)(struct ubcore_device *dev); + void (*remove)(struct ubcore_device *dev, void *client_ctx); + /* The driver needs to stay and resolve the memory mapping first, */ + /* and then release the jetty resources. */ + void (*stop)(struct ubcore_device *dev, void *client_ctx); +}; + +struct ubcore_client_ctx { + struct list_head list_node; + void *data; // Each ubep device create some data on the client, such as uburma_device. + struct ubcore_client *client; +}; + +union ubcore_umem_flag { + struct { + uint32_t non_pin : 1; /* 0: pinned to physical memory. 1: non pin. */ + uint32_t writable : 1; /* 0: read-only. 1: writable. */ + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubcore_umem { + struct ubcore_device *ub_dev; + struct mm_struct *owning_mm; + uint64_t length; + uint64_t va; + union ubcore_umem_flag flag; + struct sg_table sg_head; + uint32_t nmap; +}; + +enum ubcore_net_addr_op { + UBCORE_ADD_NET_ADDR = 0, + UBCORE_DEL_NET_ADDR = 1, + UBCORE_UPDATE_NET_ADDR = 2 +}; + +struct ubcore_mgmt_event { + struct ubcore_device *ub_dev; + union { + struct ubcore_eid_info *eid_info; + } element; + enum ubcore_mgmt_event_type event_type; +}; + +#endif diff --git a/include/ub/urma/ubcore_uapi.h b/include/ub/urma/ubcore_uapi.h new file mode 100644 index 000000000000..8c7d082aa5c0 --- /dev/null +++ b/include/ub/urma/ubcore_uapi.h @@ -0,0 +1,821 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: ubcore api for other client kmod, such as uburma. + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2021-8-3 + * Note: + * History: 2021-8-3: Create file + * History: 2021-11-25: add segment and jetty management function + * History: 2022-7-25: modify file name + */ + +#ifndef UBCORE_UAPI_H +#define UBCORE_UAPI_H + +#include "ubcore_types.h" +/** + * Application specifies the device to allocate an context. + * @param[in] dev: ubcore_device found by add ops in the client. + * @param[in] eid_index: function entity id (eid) index to set; + * @param[in] udrv_data (optional): ucontext and user space driver data + * @return: ubcore_ucontext pointer on success, NULL on fail. + * Note: this API is called only by uburma representing user-space application, + * not by other kernel modules + */ +struct ubcore_ucontext * +ubcore_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data); +/** + * Free the allocated context. + * @param[in] dev: device to free context. + * @param[in] ucontext: handle of the allocated context. + * Note: this API is called only by uburma representing user-space application, + * not by other kernel modules + */ +void ubcore_free_ucontext(struct ubcore_device *dev, + struct ubcore_ucontext *ucontext); + +/** + * add a function entity id (eid) to ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] ue_idx: ue_idx; + * @param[in] cfg: eid and the upi of ue to which the eid belongs can be specified; + * @return: the index of eid/upi, less than 0 indicating error + */ +int ubcore_add_ueid(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg); +/** + * remove a function entity id (eid) specified by idx from ub device (for uvs) + * @param[in] dev: the ubcore_device handle; + * @param[in] ue_idx: ue_idx; + * @param[in] cfg: eid and the upi of ue to which the eid belongs can be specified; + * @return: 0 on success, other value on error + */ +int ubcore_delete_ueid(struct ubcore_device *dev, uint16_t ue_idx, + struct ubcore_ueid_cfg *cfg); +/** + * query device attributes + * @param[in] dev: the ubcore_device handle; + * @param[out] attr: attributes returned to client + * @return: 0 on success, other value on error + */ +int ubcore_query_device_attr(struct ubcore_device *dev, + struct ubcore_device_attr *attr); +/** + * query device status + * @param[in] dev: the ubcore_device handle; + * @param[out] status: status returned to client + * @return: 0 on success, other value on error + */ +int ubcore_query_device_status(struct ubcore_device *dev, + struct ubcore_device_status *status); +/** + * query stats + * @param[in] dev: the ubcore_device handle; + * @param[in] key: stats type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ +int ubcore_query_stats(struct ubcore_device *dev, struct ubcore_stats_key *key, + struct ubcore_stats_val *val); +/** + * query resource + * @param[in] dev: the ubcore_device handle; + * @param[in] key: resource type and key; + * @param[in/out] val: addr and len of value + * @return: 0 on success, other value on error + */ +int ubcore_query_resource(struct ubcore_device *dev, struct ubcore_res_key *key, + struct ubcore_res_val *val); +/** + * config device + * @param[in] dev: the ubcore_device handle; + * @param[in] cfg: device configuration + * @return: 0 on success, other value on error + */ +int ubcore_config_device(struct ubcore_device *dev, + struct ubcore_device_cfg *cfg); + +/** + * set ctx data of a client + * @param[in] dev: the ubcore_device handle; + * @param[in] client: ubcore client pointer + * @param[in] data (optional): client private data to be set + * @return: 0 on success, other value on error + */ +void ubcore_set_client_ctx_data(struct ubcore_device *dev, + struct ubcore_client *client, void *data); +/** + * get ctx data of a client + * @param[in] dev: the ubcore_device handle; + * @param[in] client: ubcore client pointer + * @return: client private data set before + */ +void *ubcore_get_client_ctx_data(struct ubcore_device *dev, + struct ubcore_client *client); +/** + * Register a new client to ubcore + * @param[in] dev: the ubcore_device handle; + * @param[in] new_client: ubcore client to be registered + * @return: 0 on success, other value on error + */ +int ubcore_register_client(struct ubcore_client *new_client); +/** + * Unregister a client from ubcore + * @param[in] rm_client: ubcore client to be unregistered + */ +void ubcore_unregister_client(struct ubcore_client *rm_client); +/** + * alloc token to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] flag: token_id_flag; + * @param[in] udata (optional): ucontext and user space driver data + * @return: token id pointer on success, NULL on error + */ +struct ubcore_token_id *ubcore_alloc_token_id(struct ubcore_device *dev, + union ubcore_token_id_flag flag, + struct ubcore_udata *udata); +/** + * free token id from ubcore device + * @param[in] token_id: the token_id id alloced before; + * @return: 0 on success, other value on error + */ +int ubcore_free_token_id(struct ubcore_token_id *token_id); + +/** + * register segment to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: segment configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target segment pointer on success, NULL on error + */ +struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); +/** + * unregister segment from ubcore device + * @param[in] tseg: the segment registered before; + * @return: 0 on success, other value on error + */ +int ubcore_unregister_seg(struct ubcore_target_seg *tseg); +/** + * import a remote segment to ubcore device + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target segment handle on success, NULL on error + */ +struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); +/** + * unimport seg from ubcore device + * @param[in] tseg: the segment imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_seg(struct ubcore_target_seg *tseg); +/** + * create jfc with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfc attributes and configurations + * @param[in] jfce_handler (optional): completion event handler + * @param[in] jfae_handler (optional): jfc async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfc pointer on success, NULL on error + */ +struct ubcore_jfc *ubcore_create_jfc(struct ubcore_device *dev, + struct ubcore_jfc_cfg *cfg, + ubcore_comp_callback_t jfce_handler, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfc from ubcore device. + * @param[in] jfc: the jfc created before; + * @param[in] attr: ubcore jfc attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfc(struct ubcore_jfc *jfc, struct ubcore_jfc_attr *attr, + struct ubcore_udata *udata); +/** + * destroy jfc from ubcore device. + * @param[in] jfc: the jfc created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfc(struct ubcore_jfc *jfc); +/** + * batch destroy jfc from ubcore device. + * @param[in] jfc_arr: the jfc array created before; + * @param[in] jfc_num: jfc array length; + * @param[out] bad_jfc_index: when error, return error jfc index in the array; + * @return: 0 on success, EINVAL on invalid parameter, other value on other batch + * delete errors. + * If delete error happens(except invalid parameter), stop at the first failed + * jfc and return, these jfc before the failed jfc will be deleted normally. + */ +int ubcore_delete_jfc_batch(struct ubcore_jfc **jfc_arr, int jfc_num, + int *bad_jfc_index); +/** + * rearm jfc. + * @param[in] jfc: the jfc created before; + * @param[in] solicited_only: rearm notify by message marked with solicited flag + * @return: 0 on success, other value on error + */ +int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only); +/** + * create jfs with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfs configurations + * @param[in] jfae_handler (optional): jfs async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfs pointer on success, NULL on error + */ +struct ubcore_jfs *ubcore_create_jfs(struct ubcore_device *dev, + struct ubcore_jfs_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @param[in] attr: ubcore jfs attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_attr *attr, + struct ubcore_udata *udata); +/** + * query jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @param[out] cfg: jfs configurations; + * @param[out] attr: ubcore jfs attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jfs(struct ubcore_jfs *jfs, struct ubcore_jfs_cfg *cfg, + struct ubcore_jfs_attr *attr); +/** + * destroy jfs from ubcore device. + * @param[in] jfs: the jfs created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfs(struct ubcore_jfs *jfs); +/** + * batch destroy jfs from ubcore device. + * @param[in] jfs_arr: the jfs array created before; + * @param[in] jfs_num: jfs array length; + * @param[out] bad_jfs_index: when error, return error jfs index in the array; + * @return: 0 on success, EINVAL on invalid parameter, other value on other batch + * delete errors. + * If delete error happens(except invalid parameter), stop at the first failed + * jfs and return, these jfs before the failed jfs will be deleted normally. + */ +int ubcore_delete_jfs_batch(struct ubcore_jfs **jfs_arr, int jfs_num, + int *bad_jfs_index); +/** + * return the wrs in JFS that is not consumed to the application through cr. + * @param[in] jfs: the jfs created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_flush_jfs(struct ubcore_jfs *jfs, int cr_cnt, struct ubcore_cr *cr); +/** + * create jfr with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jfr configurations + * @param[in] jfae_handler (optional): jfr async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jfr pointer on success, NULL on error + */ +struct ubcore_jfr *ubcore_create_jfr(struct ubcore_device *dev, + struct ubcore_jfr_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @param[in] attr: ubcore jfr attr; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_attr *attr, + struct ubcore_udata *udata); +/** + * query jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @param[out] cfg: jfr configurations; + * @param[out] attr: ubcore jfr attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jfr(struct ubcore_jfr *jfr, struct ubcore_jfr_cfg *cfg, + struct ubcore_jfr_attr *attr); +/** + * destroy jfr from ubcore device. + * @param[in] jfr: the jfr created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jfr(struct ubcore_jfr *jfr); +/** + * batch destroy jfr from ubcore device. + * @param[in] jfr_arr: the jfr array created before; + * @param[in] jfr_num: jfr array length; + * @param[out] bad_jfr_index: when error, return error jfr index in the array; + * @return: 0 on success, EINVAL on invalid parameter, other value on other batch + * delete errors. + * If delete error happens(except invalid parameter), stop at the first failed + * jfr and return, these jfr before the failed jfr will be deleted normally. + */ +int ubcore_delete_jfr_batch(struct ubcore_jfr **jfr_arr, int jfr_num, + int *bad_jfr_index); +/** + * create jetty with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jetty attributes and configurations + * @param[in] jfae_handler (optional): jetty async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jetty pointer on success, NULL on error + */ +struct ubcore_jetty *ubcore_create_jetty(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg, + ubcore_event_callback_t jfae_handler, + struct ubcore_udata *udata); +/** + * modify jetty attributes. + * @param[in] jetty: the jetty created before; + * @param[in] attr: ubcore jetty attributes; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_modify_jetty(struct ubcore_jetty *jetty, + struct ubcore_jetty_attr *attr, + struct ubcore_udata *udata); +/** + * query jetty from ubcore device. + * @param[in] jetty: the jetty created before; + * @param[out] cfg: jetty configurations; + * @param[out] attr: ubcore jetty attributes; + * @return: 0 on success, other value on error + */ +int ubcore_query_jetty(struct ubcore_jetty *jetty, struct ubcore_jetty_cfg *cfg, + struct ubcore_jetty_attr *attr); +/** + * destroy jetty from ubcore device. + * @param[in] jetty: the jetty created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jetty(struct ubcore_jetty *jetty); +/** + * batch destroy jetty from ubcore device. + * @param[in] jetty_arr: the jetty array created before; + * @param[in] jetty_num: jetty array length; + * @param[out] bad_jetty_index: when error, return error jetty index in the array; + * @return: 0 on success, EINVAL on invalid parameter, other value on other batch + * delete errors. + * If delete error happens(except invalid parameter), stop at the first failed + * jetty and return, these jetty before the failed jetty will be deleted normally. + */ +int ubcore_delete_jetty_batch(struct ubcore_jetty **jetty_arr, int jetty_num, + int *bad_jetty_index); +/** + * return the wrs in JETTY that is not consumed to the application through cr. + * @param[in] jetty: the jetty created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be returned; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_flush_jetty(struct ubcore_jetty *jetty, int cr_cnt, + struct ubcore_cr *cr); +/** + * import jfr to ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jfr attributes and import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ +struct ubcore_tjetty *ubcore_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +/** + * import jfr to ubcore device by control plane. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jfr attributes and import configurations; + * @param[in] active_tp_cfg: tp configuration to active; + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jfr pointer on success, NULL on error + */ +struct ubcore_tjetty * +ubcore_import_jfr_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); + +/** + * unimport jfr from ubcore device. + * @param[in] tjfr: the target jfr imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_jfr(struct ubcore_tjetty *tjfr); +/** + * import jetty to ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ +struct ubcore_tjetty *ubcore_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +/** + * import jetty to ubcore device by control plane. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] active_tp_cfg: tp configuration to active + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ +struct ubcore_tjetty * +ubcore_import_jetty_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); + +/** + * unimport jetty from ubcore device. + * @param[in] tjetty: the target jetty imported before; + * @return: 0 on success, other value on error + */ +int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty); +/** + * Advise jfr: construct the transport channel for jfs and remote jfr. + * @param[in] jfs: jfs to use to construct the transport channel; + * @param[in] tjfr: target jfr to reach; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, + struct ubcore_udata *udata); +/** + * Unadvise jfr: Tear down the transport channel from jfs to remote jfr. + * @param[in] jfs: jfs to use to destruct the transport channel; + * @param[in] tjfr: target jfr advised before; + * @return: 0 on success, other value on error + */ +int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr); +/** + * Advise jetty: construct the transport channel between local jetty and remote jetty. + * @param[in] jetty: local jetty to construct the transport channel; + * @param[in] tjetty: target jetty to reach imported before; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + */ +int ubcore_advise_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); +/** + * Unadvise jetty: deconstruct the transport channel between local jetty and remote jetty. + * @param[in] jetty: local jetty to destruct the transport channel; + * @param[in] tjetty: target jetty advised before; + * @return: 0 on success, other value on error + */ +int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty); +/** + * Bind jetty: Bind local jetty with remote jetty, and construct a transport channel between them. + * @param[in] jetty: local jetty to bind; + * @param[in] tjetty: target jetty imported before; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + * Note: A local jetty can be binded with only one remote jetty. + * Only supported by jetty with URMA_TM_RC. + */ +int ubcore_bind_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, + struct ubcore_udata *udata); + +/** + * Bind jetty: Bind local jetty with remote jetty, and construct a transport channel between them. + * @param[in] jetty: local jetty to bind; + * @param[in] tjetty: target jetty imported before; + * @param[in] active_tp_cfg: tp configuration to active; + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + * Note: A local jetty can be binded with only one remote jetty. + * Only supported by jetty with URMA_TM_RC. + */ +int ubcore_bind_jetty_ex(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, + struct ubcore_active_tp_cfg *active_tp_cfg, + struct ubcore_udata *udata); +/** + * Unbind jetty: Unbind local jetty with remote jetty, + * and tear down the transport channel between them. + * @param[in] jetty: local jetty to unbind; + * @return: 0 on success, other value on error + */ +int ubcore_unbind_jetty(struct ubcore_jetty *jetty); +/** + * create jetty group with ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: jetty group configurations + * @param[in] jfae_handler (optional): jetty async_event handler + * @param[in] udata (optional): ucontext and user space driver data + * @return: jetty group pointer on success, NULL on error + */ +struct ubcore_jetty_group *ubcore_create_jetty_grp( + struct ubcore_device *dev, struct ubcore_jetty_grp_cfg *cfg, + ubcore_event_callback_t jfae_handler, struct ubcore_udata *udata); +/** + * destroy jetty group from ubcore device. + * @param[in] jetty_grp: the jetty group created before; + * @return: 0 on success, other value on error + */ +int ubcore_delete_jetty_grp(struct ubcore_jetty_group *jetty_grp); +/** + * import jetty to ubcore device. + * @param[in] dev: the ubcore device handle; + * @param[in] cfg: remote jetty attributes and import configurations + * @param[in] timeout: max time to wait (milliseconds) + * @param[in] cb: callback function pointer with custom user argument + * @param[in] udata (optional): ucontext and user space driver data + * @return: target jetty pointer on success, NULL on error + */ +struct ubcore_tjetty *ubcore_import_jetty_async(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + int timeout, + struct ubcore_import_cb *cb, + struct ubcore_udata *udata); +/** + * unimport jetty from ubcore device. + * @param[in] tjetty: the target jetty imported before; + * @param[in] timeout: max time to wait (milliseconds) + * @param[in] cb: callback function pointer with custom user argument + * @return: 0 on success, other value on error + */ +int ubcore_unimport_jetty_async(struct ubcore_tjetty *tjetty, int timeout, + struct ubcore_unimport_cb *cb); +/** + * Bind jetty: Bind local jetty with remote jetty, and construct a transport channel between them. + * @param[in] jetty: local jetty to bind; + * @param[in] tjetty: target jetty imported before; + * @param[in] timeout: max time to wait (milliseconds) + * @param[in] cb: callback function pointer with custom user argument + * @param[in] udata (optional): ucontext and user space driver data + * @return: 0 on success, other value on error + * Note: A local jetty can be binded with only one remote jetty. + * Only supported by jetty with URMA_TM_RC. + */ +int ubcore_bind_jetty_async(struct ubcore_jetty *jetty, + struct ubcore_tjetty *tjetty, int timeout, + struct ubcore_bind_cb *cb, + struct ubcore_udata *udata); +/** + * Unbind jetty: Unbind local jetty with remote jetty, + * and tear down the transport channel between them. + * @param[in] jetty: local jetty to unbind; + * @param[in] timeout: max time to wait (milliseconds) + * @param[in] cb: callback function pointer with custom user argument + * @return: 0 on success, other value on error + */ +int ubcore_unbind_jetty_async(struct ubcore_jetty *jetty, int timeout, + struct ubcore_unbind_cb *cb); + +/** + * get tp list from ubep. + * @param[in] dev: ubcore device pointer created before; + * @param[in] cfg: configuration to be matched; + * @param[in && out] tp_cnt: tp_cnt is the length of tp_list buffer as in parameter; + * tp_cnt is the number of tp as out parameter; + * @param[out] tp_list: tp info list to get; + * @param[in && out] udata: ucontext and user space driver data; + * @return: 0 on success, other value on error + */ +int ubcore_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata); + +/** + * set tp attributions by control plane. + * @param[in] dev: ubcore device pointer created before; + * @param[in] tp_handle: tp_handle got by ubcore_get_tp_list; + * @param[in] tp_attr_cnt: number of tp attributions; + * @param[in] tp_attr_bitmap: tp attributions bitmap, current bitmap is as follow: + * 0-retry_times_init: 3 bit 1-at: 5 bit 2-SIP: 128 bit + * 3-DIP: 128 bit 4-SMA: 48 bit 5-DMA: 48 bit + * 6-vlan_id: 12 bit 7-vlan_en: 1 bit 8-dscp: 6 bit + * 9-at_times: 5 bit 10-sl: 4 bit 11-tti: 8 bit + * @param[in] tp_attr: tp attribution values to set; + * @param[in && out] udata: ucontext and user space driver data; + * @return: 0 on success, other value on error + */ +int ubcore_set_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + const uint8_t tp_attr_cnt, const uint32_t tp_attr_bitmap, + const struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata); + +/** + * get tp attributions by control plane. + * @param[in] dev: ubcore device pointer created before; + * @param[in] tp_handle: tp_handle got by ubcore_get_tp_list; + * @param[out] tp_attr_cnt: number of tp attributions; + * @param[out] tp_attr_bitmap: tp attributions bitmap, current bitmap is as follow: + * 0-retry_times_init: 3 bit 1-at: 5 bit 2-SIP: 128 bit + * 3-DIP: 128 bit 4-SMA: 48 bit 5-DMA: 48 bit + * 6-vlan_id: 12 bit 7-vlan_en: 1 bit 8-dscp: 6 bit + * 9-at_times: 5 bit 10-sl: 4 bit 11-ttl: 8 bit + * @param[out] buf: tp attributions buffer to set; + * @param[in] buf_size: tp attributions buffer size; + * @param[in && out] udata: ucontext and user space driver data; + * @return: 0 on success, other value on error + */ +int ubcore_get_tp_attr(struct ubcore_device *dev, const uint64_t tp_handle, + uint8_t *tp_attr_cnt, uint32_t *tp_attr_bitmap, + struct ubcore_tp_attr_value *tp_attr, + struct ubcore_udata *udata); + +/** + * exchange tp info from ubep. + * @param[in] dev: ubcore device pointer created before; + * @param[in] cfg: configuration to be matched; + * @param[in] tp_handle: local tp handle; + * @param[in] tx_psn: local packet sequence number; + * @param[out] peer_tp_handle: tp_handle got by ubcore_exchange_tp_info; + * @param[out] rx_psn: remote packet sequence number; + * @param[in] udata: [Optional] udata should be NULL when called + * by kernel application and be valid when called + * by user space application + * @return: 0 on success, other value on error + */ +int ubcore_exchange_tp_info(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *cfg, uint64_t tp_handle, + uint32_t tx_psn, uint64_t *peer_tp_handle, + uint32_t *rx_psn, struct ubcore_udata *udata); + +/** + * operation of user ioctl cmd. + * @param[in] dev: the ubcore device handle; + * @param[in] k_user_ctl: kdrv user control command pointer; + * @return: 0 on success, other value on error + */ +int ubcore_user_control(struct ubcore_device *dev, + struct ubcore_user_ctl *k_user_ctl); +/** + * Client register an async_event handler to ubcore + * @param[in] dev: the ubcore device handle; + * @param[in] handler: async_event handler to be registered + * Note: the handler will be called when driver reports an async_event with + * ubcore_dispatch_async_event + */ +void ubcore_register_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler); +/** + * Client unregister async_event handler from ubcore + * @param[in] dev: the ubcore device handle; + * @param[in] handler: async_event handler to be unregistered + */ +void ubcore_unregister_event_handler(struct ubcore_device *dev, + struct ubcore_event_handler *handler); + +/* data path API */ +/** + * post jfs wr. + * @param[in] jfs: the jfs created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +/** + * post jfr wr. + * @param[in] jfr: the jfr created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); +/** + * post jetty send wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, + struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr); +/** + * post jetty receive wr. + * @param[in] jetty: the jetty created before; + * @param[in] wr: the wr to be posted; + * @param[out] bad_wr: the first failed wr; + * @return: 0 on success, other value on error + */ +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, + struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr); +/** + * poll jfc. + * @param[in] jfc: the jfc created before; + * @param[in] cr_cnt: the maximum number of CRs expected to be polled; + * @param[out] cr: the addr of returned CRs; + * @return: the number of completion record returned, 0 means no completion record returned, + * -1 on error + */ +int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr); + +/** + * Find ubcore device by eid and transport type + * @param[in] eid: the eid of device; + * @param[in] type: transport type; + * @return: ubcore device pointer on success, NULL on error + */ +struct ubcore_device *ubcore_get_device_by_eid(union ubcore_eid *eid, + enum ubcore_transport_type type); + +// for system not support cgroup +#ifndef CONFIG_CGROUP_RDMA +static inline void ubcore_cgroup_reg_dev(struct ubcore_device *dev) +{ +} + +static inline void ubcore_cgroup_unreg_dev(struct ubcore_device *dev) +{ +} + +static inline int ubcore_cgroup_try_charge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type) +{ + return 0; +} + +static inline void ubcore_cgroup_uncharge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type) +{ +} +#else +/** + * Client register cgroup dev + * @param[in] dev: the ubcore device handle; + */ +void ubcore_cgroup_reg_dev(struct ubcore_device *dev); + +/** + * Client unregister cgroup dev + * @param[in] dev: the ubcore device handle; + */ +void ubcore_cgroup_unreg_dev(struct ubcore_device *dev); + +/** + * Client try to charge cgroup count + * @param[in] cg_obj: the cgroup obj + * @param[in] dev: the ubcore device handle; + * @param[in] type: the cgroup resource type + * @return: 0 on success, other value on error + */ +int ubcore_cgroup_try_charge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type); + +/** + * Client uncharge cgroup count + * @param[in] cg_obj: the cgroup obj + * @param[in] dev: the ubcore device handle; + * @param[in] type: the cgroup resource type + */ +void ubcore_cgroup_uncharge(struct ubcore_cg_object *cg_obj, + struct ubcore_device *dev, + enum ubcore_resource_type type); +#endif // CONFIG_CGROUP_RDMA + +/** + * Get primary or port eid from topo info + * @param[in] tp_type: tp type, 0-RTP, 1-CTP, 2-UTP, + * refer to enum ubcore_tp_type; + * @param[in] src_v_eid: source virtual eid, refer + * to source bonding eid; + * @param[in] dst_v_eid: dest virtual eid, refer to + * dest bonding eid; + * @param[out] src_p_eid: source physical eid, refer + * to source primary or port eid; + * @param[out] dst_p_eid: dest physical eid, refer + * to dest primary or port eid; + */ +int ubcore_get_topo_eid(uint32_t tp_type, union ubcore_eid *src_v_eid, + union ubcore_eid *dst_v_eid, union ubcore_eid *src_p_eid, + union ubcore_eid *dst_p_eid); + +#endif -- Gitee