From c6a67c60f7f3aa9ec348eede8893dc08a9b0a942 Mon Sep 17 00:00:00 2001 From: Wen Chen Date: Wed, 12 Nov 2025 13:11:49 +0800 Subject: [PATCH 1/5] ubcore: add genl, netlink and vtp support to ubcore module. urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------------------- This patch introduces generic netlink (genl), netlink messaging, and Virtual Transport Point (VTP) support to the ubcore module, enhancing its control plane capabilities for URMA device management and communication. Key additions include: 1. Generic Netlink Interface: Implements a genl family with multiple command operations for device statistics query, resource management, EID configuration, namespace mode settings, and topology information retrieval. This provides a standardized userspace-kernel communication channel. 2. Netlink Messaging Framework: Adds comprehensive netlink message handling with session management, request/response protocols, and asynchronous operation support. Enables reliable message exchange between kernel and userspace components. 3. Virtual Transport Point Implementation: Introduces VTP management for establishing and maintaining virtual transport connections. Supports connection lifecycle management, asynchronous event handling, and resource sharing across multiple logical connections. 4. Device Management Enhancements: Extends device capabilities with MUE (Management User Element) device support, including device discovery, capability query, and dynamic EID management for flexible endpoint addressing. The patch maintains backward compatibility while providing the foundation for advanced URMA features like dynamic connection management and distributed resource coordination. Signed-off-by: Wen Chen Signed-off-by: Yongqiang Guo --- drivers/ub/urma/ubcore/ubcore_device.c | 1 + include/ub/urma/ubcore_uapi.h | 34 -------------------------- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c index f0508d9762d1..667cbe272052 100644 --- a/drivers/ub/urma/ubcore/ubcore_device.c +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -25,6 +25,7 @@ #include "ubcore_workqueue.h" #include "ubcore_main.h" #include "ubcore_cdev_file.h" +#include "ubcore_uvs_cmd.h" #define UBCORE_MAX_MUE_NUM 16 #define UBCORE_DEVICE_NAME "ubcore" diff --git a/include/ub/urma/ubcore_uapi.h b/include/ub/urma/ubcore_uapi.h index 423454c4d522..f744eaa98b74 100644 --- a/include/ub/urma/ubcore_uapi.h +++ b/include/ub/urma/ubcore_uapi.h @@ -445,40 +445,6 @@ ubcore_import_jetty_ex(struct ubcore_device *dev, struct ubcore_tjetty_cfg *cfg, * @return: 0 on success, other value on error */ int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty); -/** - * Advise jfr: construct the transport channel for jfs and remote jfr. - * @param[in] jfs: jfs to use to construct the transport channel; - * @param[in] tjfr: target jfr to reach; - * @param[in] udata (optional): ucontext and user space driver data - * @return: 0 on success, other value on error - */ -int ubcore_advise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr, - struct ubcore_udata *udata); -/** - * Unadvise jfr: Tear down the transport channel from jfs to remote jfr. - * @param[in] jfs: jfs to use to destruct the transport channel; - * @param[in] tjfr: target jfr advised before; - * @return: 0 on success, other value on error - */ -int ubcore_unadvise_jfr(struct ubcore_jfs *jfs, struct ubcore_tjetty *tjfr); -/** - * Advise jetty: construct the transport channel between local jetty and remote jetty. - * @param[in] jetty: local jetty to construct the transport channel; - * @param[in] tjetty: target jetty to reach imported before; - * @param[in] udata (optional): ucontext and user space driver data - * @return: 0 on success, other value on error - */ -int ubcore_advise_jetty(struct ubcore_jetty *jetty, - struct ubcore_tjetty *tjetty, - struct ubcore_udata *udata); -/** - * Unadvise jetty: deconstruct the transport channel between local jetty and remote jetty. - * @param[in] jetty: local jetty to destruct the transport channel; - * @param[in] tjetty: target jetty advised before; - * @return: 0 on success, other value on error - */ -int ubcore_unadvise_jetty(struct ubcore_jetty *jetty, - struct ubcore_tjetty *tjetty); /** * Bind jetty: Bind local jetty with remote jetty, and construct a transport channel between them. * @param[in] jetty: local jetty to bind; -- Gitee From 4900954c6f84e21b6a8e94d492c7109efce10b5a Mon Sep 17 00:00:00 2001 From: Wen Chen Date: Wed, 12 Nov 2025 15:27:12 +0800 Subject: [PATCH 2/5] ubcore: add segment management support to ubcore module. urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------------------- This patch introduces segment management capabilities to the ubcore module, providing memory region registration and token-based access control for URMA data operations. The implementation enables efficient memory management and secure data access across distributed systems. Key features include: 1. Segment Registration: Allows users to register memory segments with the URMA device, making them available for remote direct memory access (RDMA) operations. Supports both physical and virtual address mappings with proper access control validation. 2. Token-based Access Control: Implements token identifier management for secure segment access. Tokens provide authorization mechanisms that control which remote entities can access registered memory regions, enhancing security in multi-tenant environments. 3. Segment Import/Export: Enables sharing of memory segments between different URMA devices and processes. Supports both local registration and remote import operations with proper lifecycle management and reference counting. The segment management subsystem forms the foundation for safe and efficient memory operations in URMA, enabling high-performance data transfer while maintaining security boundaries and resource isolation. Signed-off-by: Wen Chen Signed-off-by: Yongqiang Guo --- drivers/ub/urma/ubcore/Makefile | 4 +- drivers/ub/urma/ubcore/ubcore_segment.c | 252 ++++++++++++++++++++++++ 2 files changed, 255 insertions(+), 1 deletion(-) create mode 100644 drivers/ub/urma/ubcore/ubcore_segment.c diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile index cfdbd7ce982d..5b941f5aaa7c 100644 --- a/drivers/ub/urma/ubcore/Makefile +++ b/drivers/ub/urma/ubcore/Makefile @@ -27,6 +27,8 @@ ubcore_genl.o \ ubcore_genl_admin.o \ ubcore_vtp.o \ ubcore_msg.o \ -ubcore_netlink.o +ubcore_netlink.o \ +ubcore_dp.o \ +ubcore_segment.o obj-$(CONFIG_UB_URMA) += ubcore.o diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c new file mode 100644 index 000000000000..b643fecc7ff3 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * Description: ubcore segment + * Author: Qian Guoxin, Ouyang Changchun + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: Yan Fangfang move segment implementation here + */ + +#include "ubcore_connect_bonding.h" +#include "ubcore_log.h" +#include +#include "ubcore_priv.h" +#include "ubcore_hash_table.h" +#include "ubcore_tp.h" +#include "ubcore_tp_table.h" + +struct ubcore_token_id *ubcore_alloc_token_id(struct ubcore_device *dev, + union ubcore_token_id_flag flag, + struct ubcore_udata *udata) +{ + struct ubcore_token_id *token_id; + + if (flag.bs.pa == 1 && udata != NULL) { + ubcore_log_err("invalid parameter of pa.\n"); + return ERR_PTR(-EINVAL); + } + + if (dev == NULL || dev->ops == NULL || + dev->ops->alloc_token_id == NULL || + dev->ops->free_token_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return ERR_PTR(-EINVAL); + } + + token_id = dev->ops->alloc_token_id(dev, flag, udata); + if (IS_ERR_OR_NULL(token_id)) { + ubcore_log_err("failed to alloc token_id id.\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(token_id, ENOEXEC); + } + token_id->flag = flag; + token_id->ub_dev = dev; + token_id->uctx = ubcore_get_uctx(udata); + atomic_set(&token_id->use_cnt, 0); + return token_id; +} +EXPORT_SYMBOL(ubcore_alloc_token_id); + +int ubcore_free_token_id(struct ubcore_token_id *token_id) +{ + struct ubcore_device *dev; + + if (token_id == NULL || token_id->ub_dev == NULL || + token_id->ub_dev->ops == NULL || + token_id->ub_dev->ops->free_token_id == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + dev = token_id->ub_dev; + + if (atomic_read(&token_id->use_cnt)) { + ubcore_log_err("The token_id is still being used"); + return -EBUSY; + } + return dev->ops->free_token_id(token_id); +} +EXPORT_SYMBOL(ubcore_free_token_id); + +static int ubcore_check_register_seg_para(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->register_seg == NULL || + dev->ops->unregister_seg == NULL || + IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + ubcore_log_err("invalid parameter.\n"); + return -1; + } + + if (ubcore_is_bonding_dev(dev)) + return 0; + + if (cfg->flag.bs.pa == 1 && udata != NULL) { + ubcore_log_err("invalid parameter of pa.\n"); + return -1; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + ((cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_VALID && + cfg->token_id == NULL) || + (cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + cfg->token_id != NULL))) { + ubcore_log_err("invalid parameter of token_id.\n"); + return -1; + } + + if (dev->transport_type == UBCORE_TRANSPORT_UB && + cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_VALID && + cfg->token_id->flag.bs.pa != cfg->flag.bs.pa) { + ubcore_log_err("invalid parameter of token_id pa.\n"); + return -1; + } + + if (cfg->eid_index >= dev->eid_table.eid_cnt) { + ubcore_log_warn("eid_index:%u >= eid_table cnt:%u.\n", + cfg->eid_index, dev->eid_table.eid_cnt); + return -1; + } + return 0; +} + +struct ubcore_target_seg *ubcore_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + union ubcore_token_id_flag flag = { 0 }; + bool alloc_token_id = false; + struct ubcore_seg_cfg tmp_cfg; + struct ubcore_target_seg *tseg; + + if (ubcore_check_register_seg_para(dev, cfg, udata) != 0) + return ERR_PTR(-EINVAL); + + if (udata == NULL && + cfg->flag.bs.token_id_valid == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB) + alloc_token_id = true; + + tmp_cfg = *cfg; + if (alloc_token_id == true) { + flag.bs.pa = cfg->flag.bs.pa; + tmp_cfg.token_id = ubcore_alloc_token_id(dev, flag, NULL); + if (IS_ERR_OR_NULL(tmp_cfg.token_id)) { + ubcore_log_err("alloc token id failed.\n"); + return (void *)tmp_cfg.token_id; + } + } + + tseg = dev->ops->register_seg(dev, &tmp_cfg, udata); + if (IS_ERR_OR_NULL(tseg)) { + ubcore_log_err_rl("UBEP failed to register segment.\n"); + if (alloc_token_id == true) + (void)ubcore_free_token_id(tmp_cfg.token_id); + return UBCORE_CHECK_RETURN_ERR_PTR(tseg, ENOEXEC); + } + + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg.len = tmp_cfg.len; + tseg->seg.ubva.va = tmp_cfg.va; + tseg->token_id = tmp_cfg.token_id; + + (void)memcpy(tseg->seg.ubva.eid.raw, + dev->eid_table.eid_entries[cfg->eid_index].eid.raw, + UBCORE_EID_SIZE); + (void)memcpy(&tseg->seg.attr, &cfg->flag, + sizeof(union ubcore_reg_seg_flag)); + tseg->seg.attr.bs.user_token_id = tmp_cfg.flag.bs.token_id_valid; + atomic_set(&tseg->use_cnt, 0); + if (tseg->token_id != NULL) + atomic_inc(&tseg->token_id->use_cnt); + + return tseg; +} +EXPORT_SYMBOL(ubcore_register_seg); + +int ubcore_unregister_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_token_id *token_id = NULL; + bool free_token_id = false; + struct ubcore_device *dev; + int ret; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops == NULL || + tseg->ub_dev->ops->unregister_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + + dev = tseg->ub_dev; + + if (tseg->token_id != NULL) + atomic_dec(&tseg->token_id->use_cnt); + + if (tseg->seg.attr.bs.user_token_id == UBCORE_TOKEN_ID_INVALID && + dev->transport_type == UBCORE_TRANSPORT_UB && + tseg->token_id != NULL && tseg->uctx == NULL) { + free_token_id = true; + token_id = tseg->token_id; + } + + ret = dev->ops->unregister_seg(tseg); + + if (free_token_id == true && token_id != NULL) + (void)ubcore_free_token_id(token_id); + + return ret; +} +EXPORT_SYMBOL(ubcore_unregister_seg); + +struct ubcore_target_seg *ubcore_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_target_seg *tseg; + + if (dev == NULL || cfg == NULL || dev->ops == NULL || + dev->ops->import_seg == NULL || dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return ERR_PTR(-EINVAL); + } + + if (ubcore_is_bonding_dev(dev)) { + if (ubcore_connect_exchange_udata_when_import_seg(&cfg->seg, + udata) != 0) { + ubcore_log_err( + "failed to exchange udata when import seg\n"); + return ERR_PTR(-ENOEXEC); + } + } + + tseg = dev->ops->import_seg(dev, cfg, udata); + if (IS_ERR_OR_NULL(tseg)) { + ubcore_log_err("UBEP failed to import segment with va\n"); + return UBCORE_CHECK_RETURN_ERR_PTR(tseg, ENOEXEC); + } + tseg->ub_dev = dev; + tseg->uctx = ubcore_get_uctx(udata); + tseg->seg = cfg->seg; + atomic_set(&tseg->use_cnt, 0); + + return tseg; +} +EXPORT_SYMBOL(ubcore_import_seg); + +int ubcore_unimport_seg(struct ubcore_target_seg *tseg) +{ + struct ubcore_device *dev; + + if (tseg == NULL || tseg->ub_dev == NULL || tseg->ub_dev->ops == NULL || + tseg->ub_dev->ops->unimport_seg == NULL) { + ubcore_log_err("invalid parameter.\n"); + return -EINVAL; + } + dev = tseg->ub_dev; + + return dev->ops->unimport_seg(tseg); +} +EXPORT_SYMBOL(ubcore_unimport_seg); -- Gitee From afd65f4f9c8077d583dbc60e0d4fea7cafb7deb9 Mon Sep 17 00:00:00 2001 From: Wen Chen Date: Wed, 12 Nov 2025 16:26:41 +0800 Subject: [PATCH 3/5] ubcore: implement ubcore connection management and messaging feature. urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------------------- This patch introduces the ubcm module to handle connection management an message passing functionalities within the ubcore framework. It replaces the previously used ubcore_priv.c and ubcore_netdev.h files with a more structured and maintainable design. The ubcm module provides the following key functionalities: 1. Connection Management: Implements reliable communication channels using well-known jetties for control message exchange between endpoints, including connection setup, authentication, and teardown. 2. Generic Netlink Interface: Establishes a communication bridge between kernel-space ubcore components and user-space services for configuration and control operations. 3. Message Sequencing and Retransmission: Provides reliable message delivery through sequence numbering and automatic retransmission mechanisms with configurable timeout and retry limits. The implementation includes comprehensive data path handling with support for both connection data and acknowledgment messages, while maintaining backward compatibility with existing ubcore interfaces. Signed-off-by: Wen Chen Signed-off-by: Yongqiang Guo --- drivers/ub/urma/ubcore/Makefile | 9 +- drivers/ub/urma/ubcore/ubcm/ub_cm.c | 491 +++++++ drivers/ub/urma/ubcore/ubcm/ub_cm.h | 48 + drivers/ub/urma/ubcore/ubcm/ub_mad.c | 1268 ++++++++++++++++++ drivers/ub/urma/ubcore/ubcm/ub_mad.h | 84 ++ drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h | 261 ++++ drivers/ub/urma/ubcore/ubcm/ubcm_genl.c | 891 ++++++++++++ drivers/ub/urma/ubcore/ubcm/ubcm_genl.h | 130 ++ drivers/ub/urma/ubcore/ubcm/ubcm_log.c | 15 + drivers/ub/urma/ubcore/ubcm/ubcm_log.h | 94 ++ drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c | 1201 +++++++++++++++++ drivers/ub/urma/ubcore/ubcore_main.c | 10 + drivers/ub/urma/ubcore/ubcore_priv.c | 61 - drivers/ub/urma/ubcore/ubcore_segment.c | 2 +- 14 files changed, 4501 insertions(+), 64 deletions(-) create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_cm.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_cm.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_genl.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_genl.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_log.c create mode 100644 drivers/ub/urma/ubcore/ubcm/ubcm_log.h create mode 100644 drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c delete mode 100644 drivers/ub/urma/ubcore/ubcore_priv.c diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile index 5b941f5aaa7c..940e617a753f 100644 --- a/drivers/ub/urma/ubcore/Makefile +++ b/drivers/ub/urma/ubcore/Makefile @@ -9,7 +9,7 @@ net/ubcore_sock.o \ ubcore_workqueue.o \ ubcore_main.o \ ubcore_hash_table.o \ -ubcore_priv.o \ +ubcore_tp.o \ ubcore_cgroup.o \ net/ubcore_net.o \ ubcore_cdev_file.o \ @@ -29,6 +29,11 @@ ubcore_vtp.o \ ubcore_msg.o \ ubcore_netlink.o \ ubcore_dp.o \ -ubcore_segment.o +ubcore_segment.o \ +ubcm/ubcm_log.o \ +ubcm/ub_mad.o \ +ubcm/ubcm_genl.o \ +ubcm/ub_cm.o \ +ubcm/ubmad_datapath.o obj-$(CONFIG_UB_URMA) += ubcore.o diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.c b/drivers/ub/urma/ubcore/ubcm/ub_cm.c new file mode 100644 index 000000000000..5499adc7a03f --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_cm implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include +#include +#include +#include + + + + +#include "ubcm_log.h" +#include "ubcm_genl.h" +#include "ub_mad.h" +#include "ub_cm.h" + +#define UBCM_LOG_FILE_PERMISSION (0644) + +#define UBCM_MODULE_NAME "ubcm" +#define UBCM_DEVNO_MODE (0666) +#define UBCM_DEVICE_NAME "ubcm" + +module_param(g_ubcm_log_level, uint, UBCM_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubcm_log_level, " 3: ERR, 4: WARNING, 6: INFO, 7: DEBUG"); + +struct ubcm_device { + struct kref kref; + struct list_head list_node; + struct ubcore_device *device; + struct ubmad_agent *agent; + spinlock_t agent_lock; +}; + +static struct ubcm_context g_ubcm_ctx = { 0 }; +struct ubcm_context *get_ubcm_ctx(void) +{ + return &g_ubcm_ctx; +} + +static int ubcm_open(struct inode *i_node, struct file *filp) +{ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + return 0; +} + +static int ubcm_close(struct inode *i_node, struct file *filp) +{ + module_put(THIS_MODULE); + return 0; +} + +static const struct file_operations g_ubcm_ops = { + .owner = THIS_MODULE, + .open = ubcm_open, + .release = ubcm_close, + .unlocked_ioctl = NULL, /* ubcm does not support ioctl currently */ + .compat_ioctl = NULL, +}; + +static int ubcm_add_device(struct ubcore_device *device); +static void ubcm_remove_device(struct ubcore_device *device, void *client_ctx); + +static struct ubcore_client g_ubcm_client = { .list_node = LIST_HEAD_INIT( + g_ubcm_client.list_node), + .client_name = UBCM_MODULE_NAME, + .add = ubcm_add_device, + .remove = ubcm_remove_device }; + +static char *ubcm_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBCM_DEVNO_MODE; + + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + +static struct class g_ubcm_class = { + .name = UBCM_MODULE_NAME, + .devnode = ubcm_devnode, +}; + +static int ubcm_get_ubc_dev(struct ubcore_device *device) +{ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + atomic_inc(&device->use_cnt); + return 0; +} + +static void ubcm_put_ubc_dev(struct ubcore_device *device) +{ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("Invalid parameter.\n"); + return; + } + + if (atomic_dec_and_test(&device->use_cnt)) + complete(&device->comp); +} + +static void ubcm_get_device(struct ubcm_device *cm_dev) +{ + kref_get(&cm_dev->kref); +} + +static void ubcm_kref_release(struct kref *kref) +{ + struct ubcm_device *cm_dev = + container_of(kref, struct ubcm_device, kref); + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_agent *agent; + + /* Delayed work should be flushed before resource destroy */ + flush_workqueue(cm_ctx->wq); + if (!IS_ERR_OR_NULL(cm_dev->agent)) { + agent = cm_dev->agent; + spin_lock(&cm_dev->agent_lock); + cm_dev->agent = NULL; + spin_unlock(&cm_dev->agent_lock); + (void)ubmad_unregister_agent(agent); + } + + if (!IS_ERR_OR_NULL(cm_dev->device)) { + ubcm_put_ubc_dev(cm_dev->device); + cm_dev->device = NULL; + } + + kfree(cm_dev); +} + +static void ubcm_put_device(struct ubcm_device *cm_dev) +{ + uint32_t refcnt; + + refcnt = kref_read(&cm_dev->kref); + ubcm_log_info("ubcm kref put, old refcnt: %u, new refcnt: %u.\n", + refcnt, refcnt > 0 ? refcnt - 1 : 0); + + kref_put(&cm_dev->kref, ubcm_kref_release); +} + +static int ubcm_send_handler(struct ubmad_agent *agent, + struct ubmad_send_cr *send_cr) +{ + /* Note: agent & send_buf cannot be NULL, no need to check */ + if (IS_ERR_OR_NULL(send_cr->cr)) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + if (send_cr->cr->status != UBCORE_CR_SUCCESS) { + ubcm_log_err("Cr status error: %d.\n", + (int)send_cr->cr->status); + return -EINVAL; + } + + return 0; +} + +static int ubcm_recv_handler(struct ubmad_agent *agent, + struct ubmad_recv_cr *recv_cr) +{ + /* Note: agent & recv_buf cannot be NULL, no need to check */ + struct ubcm_uvs_genl_node *uvs; + struct ubcm_nlmsg *nlmsg; + int ret; + + switch (recv_cr->msg_type) { + case UBMAD_CONN_DATA: + nlmsg = ubcm_alloc_genl_msg(recv_cr); + break; + case UBMAD_UBC_CONN_DATA: + ret = ubcore_cm_recv(agent->device, + (struct ubcore_cm_recv_cr *)recv_cr); + if (ret != 0) + ubcm_log_err( + "Failed to handle message by ubcore net, ret: %d.\n", + ret); + return ret; + case UBMAD_AUTHN_DATA: + nlmsg = ubcm_alloc_genl_authn_msg(recv_cr); + break; + default: + ubcm_log_err("Invalid msg_type: %u.\n", recv_cr->msg_type); + return -EINVAL; + } + + if (IS_ERR_OR_NULL(nlmsg)) + return -ENOMEM; + + uvs = ubcm_find_get_uvs_by_eid(&nlmsg->dst_eid); + if (uvs == NULL) { + ret = -1; + goto free_nlmsg; + } + + ret = ubcm_genl_unicast(nlmsg, ubcm_nlmsg_len(nlmsg), uvs); + if (ret != 0) + ubcm_log_err("Failed to send genl msg.\n"); + ubcm_uvs_kref_put(uvs); +free_nlmsg: + kfree(nlmsg); + return ret; +} + +static int ubcm_add_device(struct ubcore_device *device) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev; + int ret; + + cm_dev = kzalloc(sizeof(struct ubcm_device), GFP_KERNEL); + if (cm_dev == NULL) + return -ENOMEM; + + kref_init(&cm_dev->kref); + spin_lock_init(&cm_dev->agent_lock); + ret = ubcm_get_ubc_dev(device); + if (ret != 0) + goto put_dev; + cm_dev->device = device; + ubcore_set_client_ctx_data(device, &g_ubcm_client, cm_dev); + + /* Currently no send_handler needed */ + cm_dev->agent = ubmad_register_agent(device, ubcm_send_handler, + ubcm_recv_handler, (void *)cm_dev); + if (IS_ERR_OR_NULL(cm_dev->agent)) { + ubcm_log_err("Failed to register mad agent.\n"); + ret = PTR_ERR(cm_dev->agent); + goto put_dev; + } + + spin_lock(&cm_ctx->device_lock); + list_add_tail(&cm_dev->list_node, &cm_ctx->device_list); + spin_unlock(&cm_ctx->device_lock); + + return 0; +put_dev: + /* Note: cm_dev will free next */ + ubcm_put_device(cm_dev); + return ret; +} + +static void ubcm_remove_device(struct ubcore_device *device, void *client_ctx) +{ + struct ubcm_device *cm_dev = (struct ubcm_device *)client_ctx; + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + + if (cm_dev->device != device) { + ubcm_log_err("Invalid parameter.\n"); + return; + } + spin_lock(&cm_ctx->device_lock); + list_del(&cm_dev->list_node); + spin_unlock(&cm_ctx->device_lock); + + ubcm_put_device(cm_dev); +} + +void ubcm_work_handler(struct work_struct *work) +{ + struct ubcm_work *cm_work = container_of(work, struct ubcm_work, work); + struct ubmad_send_buf *send_buf = cm_work->send_buf; + struct ubmad_send_buf *bad_send_buf; + struct ubcm_device *cm_dev; + int ret; + + if (IS_ERR_OR_NULL(send_buf)) { + ubcm_log_err("Invalid parameter.\n"); + goto free_work; + } + + cm_dev = ubcm_find_get_device(&send_buf->src_eid); + if (IS_ERR_OR_NULL(cm_dev) || IS_ERR_OR_NULL(cm_dev->device)) { + ubcm_log_err("Failed to find ubcm device, src_eid: " EID_FMT + ".\n", + EID_ARGS(send_buf->src_eid)); + goto free_send_buf; + } + /* Source eid should be default eid0 for wk_jetty */ + send_buf->src_eid = cm_dev->device->eid_table.eid_entries[0].eid; + + ret = ubmad_post_send(cm_dev->device, send_buf, &bad_send_buf); + if (ret != 0) + ubcm_log_err("Failed to post send mad, ret: %d.\n", ret); + ubcm_put_device(cm_dev); + +free_send_buf: + kfree(send_buf); +free_work: + kfree(cm_work); +} + +static int ubcm_base_init(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + int ret; + + INIT_LIST_HEAD(&cm_ctx->device_list); + spin_lock_init(&cm_ctx->device_lock); + + cm_ctx->wq = alloc_workqueue(UBCM_MODULE_NAME, 0, 1); + if (IS_ERR_OR_NULL(cm_ctx->wq)) { + ubcm_log_err("Failed to alloc ubcm workqueue.\n"); + return -ENOMEM; + } + + ret = ubcore_register_client(&g_ubcm_client); + if (ret != 0) { + ubcm_log_err("Failed to register ubcm client, ret: %d.\n", ret); + destroy_workqueue(cm_ctx->wq); + cm_ctx->wq = NULL; + } + + return ret; +} + +static void ubcm_base_uninit(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev, *next; + + ubcore_unregister_client(&g_ubcm_client); + destroy_workqueue(cm_ctx->wq); + cm_ctx->wq = NULL; + + spin_lock(&cm_ctx->device_lock); + list_for_each_entry_safe(cm_dev, next, &cm_ctx->device_list, + list_node) { + list_del(&cm_dev->list_node); + ubcm_put_device(cm_dev); + } + spin_unlock(&cm_ctx->device_lock); +} + +struct ubcm_device *ubcm_find_get_device(union ubcore_eid *eid) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubcm_device *cm_dev, *next, *target = NULL; + struct ubcore_device *dev; + uint32_t idx; + + spin_lock(&cm_ctx->device_lock); + list_for_each_entry_safe(cm_dev, next, &cm_ctx->device_list, + list_node) { + dev = cm_dev->device; + spin_lock(&dev->eid_table.lock); + if (IS_ERR_OR_NULL(dev->eid_table.eid_entries)) { + spin_unlock(&dev->eid_table.lock); + continue; + } + for (idx = 0; idx < dev->attr.dev_cap.max_eid_cnt; idx++) { + if (memcmp(&dev->eid_table.eid_entries[idx].eid, eid, + sizeof(union ubcore_eid)) == 0) { + target = cm_dev; + (void)ubcm_get_device(target); + break; + } + } + spin_unlock(&dev->eid_table.lock); + if (target != NULL) + break; + } + spin_unlock(&cm_ctx->device_lock); + + return target; +} + +static int ubcm_cdev_create(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + int ret; + + ret = alloc_chrdev_region(&cm_ctx->ubcm_devno, 0, 1, UBCM_MODULE_NAME); + if (ret != 0) { + ubcm_log_err("Failed to alloc chrdev region, ret: %d.\n", ret); + return ret; + } + + /* create /sys/class/ubcm */ + ret = class_register(&g_ubcm_class); + if (ret != 0) { + ubcm_log_err("Failed to register ubcm class, ret: %d.\n", ret); + goto unreg_devno; + } + + cdev_init(&cm_ctx->ubcm_cdev, &g_ubcm_ops); + cm_ctx->ubcm_cdev.owner = THIS_MODULE; + + ret = cdev_add(&cm_ctx->ubcm_cdev, cm_ctx->ubcm_devno, 1); + if (ret != 0) { + ubcm_log_err("Failed to add ubcm chrdev, ret: %d.\n", ret); + goto unreg_class; + } + + /* create /dev/ubcm */ + cm_ctx->ubcm_dev = device_create(&g_ubcm_class, NULL, + cm_ctx->ubcm_devno, NULL, + UBCM_DEVICE_NAME); + if (IS_ERR_OR_NULL(cm_ctx->ubcm_dev)) { + ret = -1; + ubcm_log_err("Failed to create ubcm device, ret: %d.\n", + (int)PTR_ERR(cm_ctx->ubcm_dev)); + cm_ctx->ubcm_dev = NULL; + goto del_cdev; + } + + ubcm_log_info("Finish to create ubcm chrdev.\n"); + return 0; +del_cdev: + cdev_del(&cm_ctx->ubcm_cdev); +unreg_class: + class_unregister(&g_ubcm_class); +unreg_devno: + unregister_chrdev_region(cm_ctx->ubcm_devno, 1); + return ret; +} + +static void ubcm_cdev_destroy(void) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + + device_destroy(&g_ubcm_class, cm_ctx->ubcm_cdev.dev); + cm_ctx->ubcm_dev = NULL; + cdev_del(&cm_ctx->ubcm_cdev); + class_unregister(&g_ubcm_class); + unregister_chrdev_region(cm_ctx->ubcm_devno, 1); +} + +int ubcm_init(void) +{ + int ret; + + ret = ubmad_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ub_mad, ret: %d.\n", ret); + return ret; + } + + ret = ubcm_base_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ubcm base, ret: %d.\n", ret); + goto uninit_mad; + } + + ret = ubcm_cdev_create(); + if (ret != 0) { + ubcm_log_err("Failed to create ubcm chrdev, ret: %d.\n", ret); + goto uninit_base; + } + + ret = ubcm_genl_init(); + if (ret != 0) { + ubcm_log_err("Failed to init ubcm generic netlink, ret: %d.\n", + ret); + goto destroy_cdev; + } + ubcore_register_cm_send_ops(ubmad_ubc_send); + + pr_info("ubcm module init success.\n"); + return 0; +destroy_cdev: + ubcm_cdev_destroy(); +uninit_base: + ubcm_base_uninit(); +uninit_mad: + ubmad_uninit(); + return ret; +} + +void ubcm_uninit(void) +{ + ubcm_genl_uninit(); + ubcm_cdev_destroy(); + ubcm_base_uninit(); + ubmad_uninit(); + pr_info("ubcm module exits.\n"); +} diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.h b/drivers/ub/urma/ubcore/ubcm/ub_cm.h new file mode 100644 index 000000000000..97f63f70e1e9 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_cm header + * Author: Chen Yutao + * Create: 2025-01-20 + * Note: + * History: 2025-01-20: Create file + */ + +#ifndef UB_CM_H +#define UB_CM_H + +#include +#include + + +#include "net/ubcore_cm.h" + +#include "ub_mad.h" +#include "ubcm_genl.h" + +struct ubcm_context { + struct list_head device_list; + spinlock_t device_lock; + struct workqueue_struct *wq; + dev_t ubcm_devno; + struct cdev ubcm_cdev; + struct device *ubcm_dev; +}; + +struct ubcm_work { + struct work_struct work; + struct ubmad_send_buf *send_buf; +}; + +struct ubcm_context *get_ubcm_ctx(void); + +/* Note: kref will increase of ubcm_device in this operation */ +struct ubcm_device *ubcm_find_get_device(union ubcore_eid *eid); + +void ubcm_work_handler(struct work_struct *work); + +int ubcm_init(void); +void ubcm_uninit(void); + +#endif /* UB_CM_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad.c b/drivers/ub/urma/ubcore/ubcm/ub_mad.c new file mode 100644 index 000000000000..6ea30e66311d --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad.c @@ -0,0 +1,1268 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_mad implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include + + + +#include "ubcore_tp.h" +#include "ubcm_log.h" + +#include "ub_mad_priv.h" + +// udma jetty id starts from 1 currently +#define WK_JETTY_ID_INITIALIZER \ + { \ + UBMAD_WK_JETTY_ID_0, UBMAD_WK_JETTY_ID_1 \ + } +static const uint32_t g_ubmad_wk_jetty_id[UBMAD_WK_JETTY_NUM] = + WK_JETTY_ID_INITIALIZER; + +static struct list_head g_ubmad_device_list; +static DEFINE_SPINLOCK(g_ubmad_device_list_lock); + +static struct list_head g_ubmad_agent_list; +static DEFINE_SPINLOCK(g_ubmad_agent_list_lock); +static DEFINE_MUTEX(g_ubc_eid_lock); + +/* forward declaration */ +// device +static int +ubmad_create_device_priv_resources(struct ubmad_device_priv *dev_priv); +static void +ubmad_destroy_device_priv_resources(struct ubmad_device_priv *dev_priv); +static struct ubmad_device_priv * +ubmad_get_device_priv_lockless(struct ubcore_device *device); + +/* common */ +struct ubmad_bitmap *ubmad_create_bitmap(uint32_t bitmap_size) +{ + struct ubmad_bitmap *bitmap; + + bitmap = kcalloc(1, sizeof(struct ubmad_bitmap), GFP_KERNEL); + if (IS_ERR_OR_NULL(bitmap)) + return ERR_PTR(-ENOMEM); + bitmap->size = bitmap_size; + bitmap->bits = kcalloc(BITS_TO_LONGS(bitmap_size), + sizeof(unsigned long), GFP_KERNEL); + if (IS_ERR_OR_NULL(bitmap->bits)) { + kfree(bitmap); + return ERR_PTR(-ENOMEM); + } + spin_lock_init(&bitmap->lock); + return bitmap; +} + +void ubmad_destroy_bitmap(struct ubmad_bitmap *bitmap) +{ + if (bitmap->bits != NULL) + kfree(bitmap->bits); + kfree(bitmap); +} + +uint32_t ubmad_bitmap_get_id(struct ubmad_bitmap *bitmap) +{ + uint32_t id; + + spin_lock(&bitmap->lock); + id = find_first_zero_bit(bitmap->bits, bitmap->size); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("bitmap find zero bit failed\n"); + return id; + } + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return id; +} + +int ubmad_bitmap_put_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return -EINVAL; + } + clear_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +bool ubmad_bitmap_test_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + bool result; + + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return false; + } + result = test_bit(id, bitmap->bits) == 0; + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + + return result; +} + +int ubmad_bitmap_set_id(struct ubmad_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (id >= bitmap->size) { + spin_unlock(&bitmap->lock); + ubcm_log_err("invalid id %u\n", id); + return -1; + } + + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +/* ubcore event ops */ +// re-create dev resources when add eid after open dev +static void ubmad_event_cb(struct ubcore_event *event, + struct ubcore_event_handler *handler) +{ + switch (event->event_type) { + case UBCORE_EVENT_EID_CHANGE: + ubcm_log_info("No need to handle eid event.\n"); + break; + default: + ubcm_log_err("Invalid event_type: %d, dev_name: %s.\n", + event->event_type, event->ub_dev->dev_name); + break; + } +} + +static int ubmad_check_eid_in_dev(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info) +{ + int i; + + spin_lock(&dev->eid_table.lock); + for (i = 0; i < dev->eid_table.eid_cnt; i++) { + if (memcmp(&dev->eid_table.eid_entries[i].eid, &eid_info->eid, + sizeof(union ubcore_eid)) == 0 && + dev->eid_table.eid_entries[i].eid_index == + eid_info->eid_index) { + spin_unlock(&dev->eid_table.lock); + return 0; + } + } + spin_unlock(&dev->eid_table.lock); + return -1; +} + +static int +ubmad_update_device_priv_resources(struct ubmad_device_priv *dev_priv, + struct ubcore_eid_info *eid_info) +{ + int ret; + + if (memcmp(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)) == 0 && + dev_priv->eid_info.eid_index == eid_info->eid_index) { + ubcm_log_warn( + "eid_info is not changed, no need to update rsrc\n"); + return 0; + } + ubmad_destroy_device_priv_resources(dev_priv); + dev_priv->has_create_jetty_rsrc = false; + + (void)memcpy(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)); + dev_priv->eid_info.eid_index = eid_info->eid_index; + ret = ubmad_create_device_priv_resources(dev_priv); + if (ret != 0) { + ubcm_log_err("Failed to create device resources, ret: %d.\n", + ret); + return ret; + } + dev_priv->has_create_jetty_rsrc = true; + ubcm_log_info( + "Success to update priv resources: dev: %s eid_idx %d, " EID_FMT, + dev_priv->device->dev_name, dev_priv->eid_info.eid_index, + EID_ARGS(dev_priv->eid_info.eid)); + return 0; +} + +static int ubmad_ubc_eid_ops_inner(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + int ret; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(dev); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + dev->dev_name); + return -1; + } + switch (event_type) { + case UBCORE_MGMT_EVENT_EID_ADD: + if (dev_priv->has_create_jetty_rsrc) { + ret = ubmad_update_device_priv_resources(dev_priv, + eid_info); + if (ret != 0) + ubcm_log_err( + "Failed to update device resources, ret: %d.\n", + ret); + } else { + (void)memcpy(&dev_priv->eid_info.eid, &eid_info->eid, + sizeof(union ubcore_eid)); + dev_priv->eid_info.eid_index = eid_info->eid_index; + ret = ubmad_create_device_priv_resources(dev_priv); + if (ret != 0) + ubcm_log_err( + "Failed to create device resources, ret: %d.\n", + ret); + else + dev_priv->has_create_jetty_rsrc = true; + } + break; + case UBCORE_MGMT_EVENT_EID_RMV: + ubmad_destroy_device_priv_resources(dev_priv); + dev_priv->has_create_jetty_rsrc = false; + ret = 0; + break; + default: + ubcm_log_err("Invali event_type: %d.\n", event_type); + return -EINVAL; + } + ubcm_log_info("Finish to handle new eid, ret: %d, event_type: %d.\n", + ret, (int)event_type); + ubmad_put_device_priv(dev_priv); + return ret; +} + +// re-create dev resources when dispatch management event +static int ubmad_ubc_eid_ops(struct ubcore_device *dev, + struct ubcore_eid_info *eid_info, + enum ubcore_mgmt_event_type event_type) +{ + int ret; + + mutex_lock(&g_ubc_eid_lock); + if (ubmad_check_eid_in_dev(dev, eid_info) != 0) { + mutex_unlock(&g_ubc_eid_lock); + ubcm_log_err("Eid is not in dev, dev_name: %s, eid: " EID_FMT + ", eid_index: %u.\n", + dev->dev_name, EID_ARGS(eid_info->eid), + eid_info->eid_index); + return -1; + } + + ret = ubmad_ubc_eid_ops_inner(dev, eid_info, event_type); + if (ret != 0) { + mutex_unlock(&g_ubc_eid_lock); + ubcm_log_err("Failed to handle eid_ops_called, ret: %d.\n", + ret); + return ret; + } + mutex_unlock(&g_ubc_eid_lock); + return 0; +} + +/* jetty ops */ +static struct ubcore_jfc *ubmad_create_jfc_s(struct ubcore_device *device) +{ + struct ubcore_jfc_cfg jfc_cfg = { 0 }; + struct ubcore_jfc *jfc = NULL; + int rearm_ret; + + jfc_cfg.depth = UBMAD_JFS_DEPTH; + jfc = ubcore_create_jfc(device, &jfc_cfg, ubmad_jfce_handler_s, NULL, + NULL); + if (IS_ERR_OR_NULL(jfc)) { + ubcm_log_err("create jfc_s failed\n"); + return jfc; + } + + rearm_ret = ubcore_rearm_jfc(jfc, false); + if (rearm_ret != 0) { + ubcm_log_err("rearm jfc_s failed. ret %d\n", rearm_ret); + return NULL; + } + + return jfc; +} + +static struct ubcore_jfc *ubmad_create_jfc_r(struct ubcore_device *device) +{ + struct ubcore_jfc_cfg jfc_cfg = { 0 }; + struct ubcore_jfc *jfc = NULL; + int rearm_ret; + + jfc_cfg.depth = UBMAD_JFR_DEPTH; + jfc = ubcore_create_jfc(device, &jfc_cfg, ubmad_jfce_handler_r, NULL, + NULL); + if (IS_ERR_OR_NULL(jfc)) { + ubcm_log_err("create jfc_r failed\n"); + return jfc; + } + + rearm_ret = ubcore_rearm_jfc(jfc, false); + if (rearm_ret != 0) { + ubcm_log_err("rearm jfc_r failed. ret %d\n", rearm_ret); + return NULL; + } + + return jfc; +} + +static struct ubcore_jfr *ubmad_create_jfr(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r) +{ + struct ubcore_jfr_cfg jfr_cfg = { 0 }; + + jfr_cfg.id = 0U; + jfr_cfg.depth = UBMAD_JFR_DEPTH; + jfr_cfg.flag.bs.token_policy = UBCORE_TOKEN_NONE; + jfr_cfg.trans_mode = UBCORE_TP_RM; + jfr_cfg.eid_index = dev_priv->eid_info.eid_index; + jfr_cfg.max_sge = UBMAD_JFR_MAX_SGE_NUM; + jfr_cfg.jfc = jfc_r; + + return ubcore_create_jfr(dev_priv->device, &jfr_cfg, NULL, NULL); +} + +static struct ubcore_jetty * +ubmad_create_jetty(struct ubmad_device_priv *dev_priv, struct ubcore_jfc *jfc_s, + struct ubcore_jfc *jfc_r, struct ubcore_jfr *jfr, + uint32_t jetty_id) +{ + struct ubcore_jetty_cfg jetty_cfg = { 0 }; + + jetty_cfg.id = jetty_id; + jetty_cfg.flag.bs.share_jfr = 1; + jetty_cfg.trans_mode = UBCORE_TP_RM; + jetty_cfg.eid_index = dev_priv->eid_info.eid_index; + jetty_cfg.jfs_depth = UBMAD_JFS_DEPTH; + jetty_cfg.priority = 0; /* Highest priority */ + jetty_cfg.max_send_sge = UBMAD_JFS_MAX_SGE_NUM; + jetty_cfg.max_send_rsge = UBMAD_JFS_MAX_SGE_NUM; + jetty_cfg.jfr_depth = UBMAD_JFR_DEPTH; + jetty_cfg.max_recv_sge = UBMAD_JFR_MAX_SGE_NUM; + jetty_cfg.send_jfc = jfc_s; + jetty_cfg.recv_jfc = jfc_r; + jetty_cfg.jfr = jfr; + jetty_cfg.err_timeout = UBMAD_JETTY_ERR_TIMEOUT; + + return ubcore_create_jetty(dev_priv->device, &jetty_cfg, NULL, NULL); +} + +static struct ubmad_tjetty * +ubmad_get_tjetty_lockless(struct ubmad_jetty_resource *rsrc, uint32_t hash, + union ubcore_eid *dst_eid) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[hash], + node) { + if (memcmp(&tjetty->tjetty->cfg.id.eid, dst_eid, + sizeof(union ubcore_eid)) == 0) { + kref_get(&tjetty->kref); + return tjetty; + } + } + return NULL; +} + +struct ubmad_tjetty *ubmad_get_tjetty(union ubcore_eid *dst_eid, + struct ubmad_jetty_resource *rsrc) +{ + unsigned long flag; + uint32_t hash = jhash(dst_eid, sizeof(union ubcore_eid), 0) % + UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty = NULL; + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + return tjetty; +} + +static void ubmad_release_tjetty(struct kref *kref) +{ + struct ubmad_tjetty *tjetty = + container_of(kref, struct ubmad_tjetty, kref); + int ret; + + ubmad_uninit_msn_mgr(&tjetty->msn_mgr); + + ret = ubcore_unimport_jetty(tjetty->tjetty); + if (ret != 0) + ubcore_log_err("Failed to unimport jetty, ret: %d.\n", ret); + kfree(tjetty); +} + +void ubmad_put_tjetty(struct ubmad_tjetty *tjetty) +{ + kref_put(&tjetty->kref, ubmad_release_tjetty); +} + +static int ubmad_fill_get_tp_cfg(struct ubcore_device *dev, + struct ubcore_get_tp_cfg *get_tp_cfg, + struct ubcore_tjetty_cfg *cfg) +{ + uint32_t eid_index = cfg->eid_index; + + get_tp_cfg->flag.bs.ctp = 1; + get_tp_cfg->flag.bs.rtp = 0; + get_tp_cfg->flag.bs.utp = 0; + + get_tp_cfg->trans_mode = cfg->trans_mode; + + spin_lock(&dev->eid_table.lock); + if (eid_index >= dev->eid_table.eid_cnt || + dev->eid_table.eid_entries == NULL || + dev->eid_table.eid_entries[eid_index].valid == false) { + spin_unlock(&dev->eid_table.lock); + ubcore_log_err("Invalid parameter, eid_index: %u.\n", + eid_index); + return -EINVAL; + } + /* Need to adapt bonding primary eid */ + get_tp_cfg->local_eid = dev->eid_table.eid_entries[eid_index].eid; + spin_unlock(&dev->eid_table.lock); + get_tp_cfg->peer_eid = cfg->id.eid; + + return 0; +} + +static struct ubcore_tjetty * +ubmad_import_jetty_compat(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubcore_active_tp_cfg active_tp_cfg = { 0 }; + struct ubcore_get_tp_cfg get_tp_cfg = { 0 }; + struct ubcore_tp_info tp_list = { 0 }; + struct ubcore_tjetty *tjetty = NULL; + uint32_t tp_cnt = 1; + int ret; + + if (!ubcore_have_tp_ctrlplane_ops(dev) || + dev->ops->unimport_jfr == NULL || cfg == NULL || + dev->attr.dev_cap.max_eid_cnt <= cfg->eid_index) + return ERR_PTR(-EINVAL); + + if (ubmad_fill_get_tp_cfg(dev, &get_tp_cfg, cfg) != 0) + return NULL; + + ret = ubcore_get_tp_list(dev, &get_tp_cfg, &tp_cnt, &tp_list, NULL); + if (ret != 0 || tp_cnt != 1) { + ubcore_log_err("Failed to get tp list, ret: %d, tp_cnt: %u.\n", + ret, tp_cnt); + return NULL; + } + active_tp_cfg.tp_handle = tp_list.tp_handle; + ubcore_log_info("Finish to get tp, tpid: %u, tp_cnt: %u, leid: " EID_FMT + ", deid: " EID_FMT ".\n", + (uint32_t)tp_list.tp_handle.bs.tpid, + (uint32_t)tp_list.tp_handle.bs.tp_cnt, + EID_ARGS(get_tp_cfg.local_eid), + EID_ARGS(get_tp_cfg.peer_eid)); + + tjetty = ubcore_import_jetty_ex(dev, cfg, &active_tp_cfg, udata); + if (IS_ERR_OR_NULL(tjetty)) + ubcore_log_err("Failed to import jetty ex.\n"); + + return tjetty; +} + +/* need to put twice to release tjetty. + * First put for kref_get() is called by user after finish using tjetty locally. + * Second put for kref_init() is in ubmad_unimport_jetty(). + */ +struct ubmad_tjetty *ubmad_import_jetty(struct ubcore_device *device, + struct ubmad_jetty_resource *rsrc, + union ubcore_eid *dst_eid) +{ + unsigned long flag; + uint32_t hash = jhash(dst_eid, sizeof(union ubcore_eid), 0) % + UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty = NULL, *new_tjetty = NULL; + struct ubcore_tjetty *new_target = NULL; + struct ubcore_tjetty_cfg tjetty_cfg = { 0 }; + + /* get first */ + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); // put by user + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + if (!IS_ERR_OR_NULL(tjetty)) { + ubcm_log_info("tjetty0 already imported. eid " EID_FMT "\n", + EID_ARGS(*dst_eid)); + return tjetty; + } + + /* not exist, import then */ + new_tjetty = kzalloc(sizeof(struct ubmad_tjetty), GFP_KERNEL); + if (IS_ERR_OR_NULL(new_tjetty)) + return ERR_PTR(-ENOMEM); + kref_init(&new_tjetty->kref); // put in ubmad_unimport_jetty() + + tjetty_cfg.id.id = rsrc->jetty_id; + tjetty_cfg.id.eid = *dst_eid; + tjetty_cfg.flag.bs.token_policy = UBCORE_TOKEN_NONE; + tjetty_cfg.trans_mode = UBCORE_TP_RM; + tjetty_cfg.type = UBCORE_JETTY; + tjetty_cfg.eid_index = rsrc->jetty->jetty_cfg.eid_index; + new_target = ubmad_import_jetty_compat(device, &tjetty_cfg, NULL); + if (IS_ERR_OR_NULL(new_target)) { + ubcm_log_err("import tjetty: %u failed. eid " EID_FMT "\n", + rsrc->jetty_id, EID_ARGS(*dst_eid)); + goto free; + } + new_tjetty->tjetty = new_target; + + ubmad_init_msn_mgr(&new_tjetty->msn_mgr); + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + /* get again in case of concurrence */ + tjetty = ubmad_get_tjetty_lockless(rsrc, hash, dst_eid); // put by user + if (!IS_ERR_OR_NULL(tjetty)) { + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + ubcm_log_info( + "tjetty0 already imported. dev_name: %s, deid " EID_FMT + ".\n", + device->dev_name, EID_ARGS(*dst_eid)); + goto uninit_msn_mgr; + } + + /* still not exist, use new_tjetty */ + // kref_get for new imported tjetty to unify put logics with tjetty got from hlist + kref_get(&new_tjetty->kref); + + // add to hlist + INIT_HLIST_NODE(&new_tjetty->node); + hlist_add_head(&new_tjetty->node, &rsrc->tjetty_hlist[hash]); + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + ubcm_log_info( + "import tjetty0 and add to hlist succeeded. dev_name: %s, deid " EID_FMT + ".\n", + device->dev_name, EID_ARGS(*dst_eid)); + return new_tjetty; + +uninit_msn_mgr: + ubmad_uninit_msn_mgr(&new_tjetty->msn_mgr); + ubcore_unimport_jetty(new_target); +free: + kfree(new_tjetty); + return tjetty; +} + +static void ubmad_unimport_jetty(struct ubmad_tjetty *tjetty) +{ + ubmad_put_tjetty(tjetty); // second put for ubmad_import_jetty() +} + +void ubmad_remove_tjetty(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_TJETTY_NUM; + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + + ubcm_log_info("Remove tjetty, leid: " EID_FMT ", reid: " EID_FMT ".\n", + EID_ARGS(rsrc->jetty->jetty_id.eid), EID_ARGS(*seid)); + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[hash], + node) { + if (memcmp(&tjetty->tjetty->cfg.id.eid, seid, + sizeof(union ubcore_eid)) == 0) { + hlist_del(&tjetty->node); + ubmad_unimport_jetty(tjetty); + } + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); +} + +/* seg ops */ +static struct ubcore_target_seg *ubmad_register_seg(struct ubcore_device *dev, + uint32_t num_sge) +{ + void *seg_va = NULL; + union ubcore_reg_seg_flag flag = { 0 }; + uint64_t seg_len = UBMAD_SGE_MAX_LEN * num_sge; + struct ubcore_seg_cfg cfg = { 0 }; + struct ubcore_target_seg *ret; + + seg_va = kzalloc(seg_len, GFP_KERNEL); + if (IS_ERR_OR_NULL(seg_va)) + return ERR_PTR(-ENOMEM); + flag.bs.token_policy = UBCORE_TOKEN_NONE; + flag.bs.cacheable = UBCORE_NON_CACHEABLE; + flag.bs.access = UBCORE_ACCESS_LOCAL_ONLY; + cfg.va = (uint64_t)seg_va; + cfg.len = seg_len; + cfg.flag = flag; + + ret = ubcore_register_seg(dev, &cfg, NULL); + if (IS_ERR_OR_NULL(ret)) { + ubcm_log_err("reg seg failed\n"); + goto free; + } + ubcm_log_info("Finish to register seg, va: 0x%llx, len: %llu", cfg.va, + seg_len); + return ret; + +free: + kfree(seg_va); + return ret; +} + +static void ubmad_unregister_seg(struct ubcore_target_seg *seg) +{ + uint64_t va = seg->seg.ubva.va; + + (void)ubcore_unregister_seg(seg); + kfree((void *)va); +} + +static int ubmad_create_seg(struct ubmad_jetty_resource *rsrc, + struct ubcore_device *device) +{ + // send_seg + rsrc->send_seg = ubmad_register_seg(device, UBMAD_SEND_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->send_seg)) { + ubcm_log_err("register send_seg failed.\n"); + return -1; + } + rsrc->send_seg_bitmap = ubmad_create_bitmap(UBMAD_SEND_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->send_seg_bitmap)) { + ubcm_log_err("alloc send_seg_bitmap failed\n"); + goto unreg_send_seg; + } + + // recv_seg + rsrc->recv_seg = ubmad_register_seg(device, UBMAD_RECV_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->recv_seg)) { + ubcm_log_err("register recv_seg failed\n"); + goto free_send_seg_bitmap; + } + rsrc->recv_seg_bitmap = ubmad_create_bitmap(UBMAD_RECV_SGE_NUM); + if (IS_ERR_OR_NULL(rsrc->recv_seg_bitmap)) { + ubcm_log_err("alloc recv_seg_bitmap failed\n"); + rsrc->recv_seg_bitmap = NULL; + goto unreg_recv_seg; + } + + return 0; + +unreg_recv_seg: + ubmad_unregister_seg(rsrc->recv_seg); + rsrc->recv_seg = NULL; +free_send_seg_bitmap: + ubmad_destroy_bitmap(rsrc->send_seg_bitmap); + rsrc->send_seg_bitmap = NULL; +unreg_send_seg: + ubmad_unregister_seg(rsrc->send_seg); + rsrc->send_seg = NULL; + return -1; +} + +static void ubmad_destroy_seg(struct ubmad_jetty_resource *rsrc) +{ + ubmad_destroy_bitmap(rsrc->recv_seg_bitmap); + rsrc->recv_seg_bitmap = NULL; + ubmad_unregister_seg(rsrc->recv_seg); + rsrc->recv_seg = NULL; + + ubmad_destroy_bitmap(rsrc->send_seg_bitmap); + rsrc->send_seg_bitmap = NULL; + ubmad_unregister_seg(rsrc->send_seg); + rsrc->send_seg = NULL; +} + +/* jetty rsrc */ +static int ubmad_init_jetty_rsrc(struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv) +{ + struct ubcore_device *device = dev_priv->device; + struct ubcore_jetty *jetty; + struct ubcore_jfc *jfc_s; + struct ubcore_jfc *jfc_r; + struct ubcore_jfr *jfr; + uint32_t idx; + int ret; + + /* create jetty */ + jfc_s = ubmad_create_jfc_s(device); + if (IS_ERR_OR_NULL(jfc_s)) { + ubcm_log_err("fail to create jfc_s. dev_name: %s\n", + device->dev_name); + return -1; + } + rsrc->jfc_s = jfc_s; + + jfc_r = ubmad_create_jfc_r(device); + if (IS_ERR_OR_NULL(jfc_r)) { + ubcm_log_err("fail to create jfc_r. dev_name: %s\n", + device->dev_name); + ret = -1; + goto del_jfc_s; + } + rsrc->jfc_r = jfc_r; + + jfr = ubmad_create_jfr(dev_priv, jfc_r); + if (IS_ERR_OR_NULL(jfr)) { + ubcm_log_err("fail to create jfr. dev_name: %s\n", + device->dev_name); + ret = -1; + goto del_jfc_r; + } + + jetty = ubmad_create_jetty(dev_priv, jfc_s, jfc_r, jfr, rsrc->jetty_id); + if (IS_ERR_OR_NULL(jetty)) { + ubcm_log_err("fail to create wk jetty. dev_name: %s, id: %u.\n", + device->dev_name, rsrc->jetty_id); + ret = -1; + goto del_jfr; + } + atomic_set(&rsrc->tx_in_queue, 0); + + ubcm_log_info("well-known jetty id %u eid " EID_FMT ", jfr id: %u.\n", + jetty->jetty_id.id, EID_ARGS(jetty->jetty_id.eid), + jfr->jfr_id.id); + rsrc->jetty = jetty; + + /* create seg */ + ret = ubmad_create_seg(rsrc, device); + if (ret != 0) { + ubcm_log_err("create seg failed. device %s.\n", + device->dev_name); + goto del_jetty; + } + + /* first batch of post_recv */ + for (idx = 0; idx < UBMAD_JFR_DEPTH; idx++) { + ret = ubmad_post_recv(rsrc); + if (ret != 0) { + ubcm_log_err( + "No. %u post recv in the first batch failed. device %s ret %d\n", + idx, device->dev_name, ret); + goto destroy_seg; + } + } + + /* tjetty */ + for (idx = 0; idx < UBMAD_MAX_TJETTY_NUM; idx++) + INIT_HLIST_HEAD(&rsrc->tjetty_hlist[idx]); + spin_lock_init(&rsrc->tjetty_hlist_lock); + + /* reliable communication */ + ubmad_init_seid_hlist(rsrc); + + return 0; +destroy_seg: + ubmad_destroy_seg(rsrc); +del_jetty: + (void)ubcore_delete_jetty(jetty); +del_jfr: + (void)ubcore_delete_jfr(jfr); +del_jfc_r: + (void)ubcore_delete_jfc(jfc_r); +del_jfc_s: + (void)ubcore_delete_jfc(jfc_s); + return ret; +} + +static void ubmad_uninit_jetty_rsrc(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + int i; + + /* reliable communication */ + ubmad_uninit_seid_hlist(rsrc); + + /* tjetty */ + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + for (i = 0; i < UBMAD_MAX_TJETTY_NUM; i++) { + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[i], + node) { + hlist_del(&tjetty->node); + ubmad_unimport_jetty(tjetty); + } + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); + + ubmad_destroy_seg(rsrc); + (void)ubcore_delete_jetty(rsrc->jetty); + rsrc->jetty = NULL; + + (void)ubcore_delete_jfr(rsrc->jfr); + rsrc->jfr = NULL; + + (void)ubcore_delete_jfc(rsrc->jfc_r); + rsrc->jfc_r = NULL; + + (void)ubcore_delete_jfc(rsrc->jfc_s); + rsrc->jfc_s = NULL; +} + +static int ubmad_init_jetty_rsrc_array(struct ubmad_jetty_resource *rsrc_array, + struct ubmad_device_priv *dev_priv) +{ + int i, j; + int ret; + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + rsrc_array[i].jetty_id = g_ubmad_wk_jetty_id[i]; + ret = ubmad_init_jetty_rsrc(&rsrc_array[i], dev_priv); + if (ret != 0) { + ubcm_log_err( + "Failed to init jetty rsrc, index: %d, ret: %d.\n", + i, ret); + goto uninit_rsrc; + } + } + ubcm_log_info("Finish to init jetty resource.\n"); + + return 0; +uninit_rsrc: + for (j = 0; j < i; j++) + ubmad_uninit_jetty_rsrc(&rsrc_array[j]); + return ret; +} + +static void +ubmad_uninit_jetty_rsrc_array(struct ubmad_jetty_resource *rsrc_array) +{ + int i; + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) + ubmad_uninit_jetty_rsrc(&rsrc_array[i]); +} + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_s(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_s) +{ + int i; + + /* No need to lock as dev_priv kref_put, so jetty resources are valid */ + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + if (dev_priv->jetty_rsrc[i].jfc_s == jfc_s) + return &dev_priv->jetty_rsrc[i]; + } + + return NULL; +} + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_r(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r) +{ + int i; + + /* No need to lock as dev_priv kref_put, so jetty resources are valid */ + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) { + if (dev_priv->jetty_rsrc[i].jfc_r == jfc_r) + return &dev_priv->jetty_rsrc[i]; + } + + return NULL; +} + +/* device */ +static int +ubmad_create_device_priv_resources(struct ubmad_device_priv *dev_priv) +{ + struct ubcore_device *device = dev_priv->device; + struct ubcore_eid_info *eid_list = NULL; + uint32_t cnt = 0; + int ret; + + /* check */ + if (dev_priv->valid) { + ubcm_log_warn("dev_priv rsrc already inited. dev_name: %s\n", + device->dev_name); + return 0; + } + + eid_list = ubcore_get_eid_list(device, &cnt); + if (eid_list == NULL || cnt == 0) { + ubcm_log_warn( + "No eid_list in device: %s, do not create wk-jetty resource.\n", + device->dev_name); + return 0; + } + + ret = ubmad_init_jetty_rsrc_array(dev_priv->jetty_rsrc, dev_priv); + if (ret != 0) { + ubcm_log_err("Failed to init jetty rsrc array, ret: %d.\n", + ret); + return ret; + } + + dev_priv->valid = true; + return 0; +} + +static void +ubmad_destroy_device_priv_resources(struct ubmad_device_priv *dev_priv) +{ + if (!dev_priv->valid) { + ubcm_log_warn( + "dev_priv rsrc not inited. No need to uninit. dev_name: %s\n", + dev_priv->device->dev_name); + return; + } + dev_priv->valid = false; + + ubmad_uninit_jetty_rsrc_array(dev_priv->jetty_rsrc); +} + +static struct ubmad_device_priv * +ubmad_get_device_priv_lockless(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv, *next; + + list_for_each_entry_safe(dev_priv, next, &g_ubmad_device_list, node) { + if (dev_priv->device == device) { + kref_get(&dev_priv->kref); + return dev_priv; + } + } + return NULL; +} + +struct ubmad_device_priv *ubmad_get_device_priv(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + + return dev_priv; +} + +static void ubmad_release_device_priv(struct kref *kref) +{ + struct ubmad_device_priv *dev_priv = + container_of(kref, struct ubmad_device_priv, kref); + + /* retransmission */ + flush_workqueue(dev_priv->rt_wq); + destroy_workqueue(dev_priv->rt_wq); + + /* rsrc */ + ubmad_destroy_device_priv_resources(dev_priv); + + /* basic */ + ubcore_unregister_event_handler(dev_priv->device, &dev_priv->handler); + kfree(dev_priv); +} + +void ubmad_put_device_priv(struct ubmad_device_priv *dev_priv) +{ + kref_put(&dev_priv->kref, ubmad_release_device_priv); +} + +// init dev_priv rsrc fail won't cause this func ret err +static int ubmad_open_device(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + /* basic */ + dev_priv = kzalloc(sizeof(struct ubmad_device_priv), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + kref_init(&dev_priv->kref); + dev_priv->device = device; + dev_priv->handler.event_callback = ubmad_event_cb; + ubcore_register_event_handler(device, &dev_priv->handler); + + /* rsrc */ + if (ubmad_create_device_priv_resources(dev_priv) != 0) { + // It could be due to eid not added. Wait for ubcore add eid event to init again. + ubcm_log_warn("fail to create dev_priv rsrc. dev_name: %s\n", + device->dev_name); + } + + /* reliable communication */ + dev_priv->rt_wq = create_workqueue("ubmad rt_wq"); + if (IS_ERR_OR_NULL(dev_priv->rt_wq)) { + ubcm_log_err("create rt_wq failed. dev_name: %s\n", + device->dev_name); + ubmad_destroy_device_priv_resources(dev_priv); + ubcore_unregister_event_handler(dev_priv->device, + &dev_priv->handler); + kfree(dev_priv); + return -1; + } + + /* add to list */ + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + list_add_tail(&dev_priv->node, &g_ubmad_device_list); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + + return 0; +} + +static void ubmad_rsrc_notify_close(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_tjetty *tjetty; + struct hlist_node *next; + unsigned long flag; + int i; + + if (IS_ERR_OR_NULL(rsrc->jetty) || IS_ERR_OR_NULL(rsrc->send_seg) || + IS_ERR_OR_NULL(rsrc->send_seg_bitmap)) { + ubcm_log_warn("Invalid parameter.\n"); + return; + } + + spin_lock_irqsave(&rsrc->tjetty_hlist_lock, flag); + for (i = 0; i < UBMAD_MAX_TJETTY_NUM; i++) { + hlist_for_each_entry_safe(tjetty, next, &rsrc->tjetty_hlist[i], + node) + ubmad_post_send_close_req(rsrc, tjetty->tjetty); + } + spin_unlock_irqrestore(&rsrc->tjetty_hlist_lock, flag); +} + +// send close request to all tjetty before remove kmod +static void ubmad_notify_close(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + int i; + + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + if (dev_priv == NULL) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return; + } + + for (i = 0; i < UBMAD_WK_JETTY_NUM; i++) + ubmad_rsrc_notify_close(&dev_priv->jetty_rsrc[i]); + + ubmad_put_device_priv(dev_priv); +} + +static int ubmad_close_device(struct ubcore_device *device) +{ + struct ubmad_device_priv *dev_priv; + unsigned long flag; + + /* remove from list */ + spin_lock_irqsave(&g_ubmad_device_list_lock, flag); + dev_priv = ubmad_get_device_priv_lockless(device); + if (dev_priv == NULL) { + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return -ENODEV; + } + list_del(&dev_priv->node); + spin_unlock_irqrestore(&g_ubmad_device_list_lock, flag); + ubmad_put_device_priv(dev_priv); // put for get above + + /* left triggered by put */ + ubmad_put_device_priv( + dev_priv); // put for kref_init() in ubmad_open_device() + return 0; +} + +static int ubmad_add_device(struct ubcore_device *device) +{ + /* Use main device, do not use namespace logic device */ + int ret; + + /* open dev */ + ret = ubmad_open_device(device); + if (ret != 0) { + ubcm_log_err( + "fail to open mad device, dev_name: %s, ret: %d.\n", + device->dev_name, ret); + return ret; + } + + return 0; +} + +static void ubmad_remove_device(struct ubcore_device *device, void *client_ctx) +{ + int ret; + + ubmad_notify_close(device); + + ret = ubmad_close_device(device); + if (ret != 0) + ubcm_log_err("Failed to close ubmad device, dev_name: %s.\n", + device->dev_name); +} + +static struct ubcore_client g_ubmad_client = { + .list_node = LIST_HEAD_INIT(g_ubmad_client.list_node), + .client_name = "ubmad", + .add = ubmad_add_device, + .remove = ubmad_remove_device +}; + +int ubmad_init(void) +{ + int ret; + + INIT_LIST_HEAD(&g_ubmad_device_list); + INIT_LIST_HEAD(&g_ubmad_agent_list); + + ret = ubcore_register_client(&g_ubmad_client); + if (ret != 0) { + ubcm_log_err("Failed to register ub_mad client, ret: %d.\n", + ret); + return ret; + } + + ubcore_register_cm_eid_ops(ubmad_ubc_eid_ops); + + return 0; +} + +void ubmad_uninit(void) +{ + ubcore_unregister_client(&g_ubmad_client); +} + +/* agent ops */ +static struct ubmad_agent_priv * +ubmad_get_agent_priv_lockless(struct ubcore_device *device) +{ + struct ubmad_agent_priv *agent_priv, *next; + + list_for_each_entry_safe(agent_priv, next, &g_ubmad_agent_list, node) { + if (agent_priv->agent.device == device) { + kref_get(&agent_priv->kref); + return agent_priv; + } + } + + return NULL; +} + +struct ubmad_agent_priv *ubmad_get_agent_priv(struct ubcore_device *device) +{ + struct ubmad_agent_priv *agent_priv; + unsigned long flag; + + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + agent_priv = ubmad_get_agent_priv_lockless(device); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + return agent_priv; +} + +static void ubmad_release_agent_priv(struct kref *kref) +{ + struct ubmad_agent_priv *agent_priv = + container_of(kref, struct ubmad_agent_priv, kref); + + flush_workqueue(agent_priv->jfce_wq); + destroy_workqueue(agent_priv->jfce_wq); + + kfree(agent_priv); +} + +void ubmad_put_agent_priv(struct ubmad_agent_priv *agent_priv) +{ + kref_put(&agent_priv->kref, ubmad_release_agent_priv); +} + +struct ubmad_agent *ubmad_register_agent(struct ubcore_device *device, + ubmad_send_handler send_handler, + ubmad_recv_handler recv_handler, + void *usr_ctx) +{ + struct ubmad_agent *agent; + struct ubmad_agent_priv *agent_priv; + unsigned long flag; + + /* check inputs */ + if (IS_ERR_OR_NULL(device)) { + ubcm_log_err("device nullptr\n"); + return ERR_PTR(-EINVAL); + } + if (IS_ERR_OR_NULL(send_handler)) + ubcm_log_warn("send_handler null\n"); + if (IS_ERR_OR_NULL(recv_handler)) + ubcm_log_warn("recv_handler null\n"); + + /* create agent_priv */ + agent_priv = kzalloc(sizeof(struct ubmad_agent_priv), GFP_KERNEL); + if (IS_ERR_OR_NULL(agent_priv)) + return ERR_PTR(-ENOMEM); + kref_init(&agent_priv->kref); + + agent_priv->jfce_wq = create_workqueue("ubmad jfce_wq"); + if (IS_ERR_OR_NULL(agent_priv->jfce_wq)) { + ubcm_log_err("create agent_priv workqueue failed.\n"); + kfree(agent_priv); + return NULL; + } + + /* init agent */ + agent = &agent_priv->agent; + agent->device = device; + agent->send_handler = send_handler; + agent->recv_handler = recv_handler; + agent->usr_ctx = usr_ctx; + + /* add to list */ + INIT_LIST_HEAD(&agent_priv->node); + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + list_add_tail(&agent_priv->node, &g_ubmad_agent_list); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + return agent; +} + +int ubmad_unregister_agent(struct ubmad_agent *agent) +{ + unsigned long flag; + struct ubmad_agent_priv *agent_priv; + + if (IS_ERR_OR_NULL(agent)) { + ubcm_log_err("agent nullptr\n"); + return -EINVAL; + } + + /* remove from list */ + agent_priv = container_of(agent, struct ubmad_agent_priv, agent); + spin_lock_irqsave(&g_ubmad_agent_list_lock, flag); + list_del(&agent_priv->node); + spin_unlock_irqrestore(&g_ubmad_agent_list_lock, flag); + + ubmad_put_agent_priv(agent_priv); // put for kref_init() + + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad.h b/drivers/ub/urma/ubcore/ubcm/ub_mad.h new file mode 100644 index 000000000000..f19095847c7e --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_mad header, only including Northbound API + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UB_MAD_H +#define UB_MAD_H + + +#include "net/ubcore_cm.h" + +/* agent */ +#define UBMAD_SGE_MAX_LEN 2048 // cm data max len + +enum ubmad_msg_type { + UBMAD_CONN_DATA = 0, + UBMAD_CONN_ACK, + UBMAD_UBC_CONN_DATA = UBCORE_CM_CONN_MSG, + UBMAD_UBC_CONN_ACK, + UBMAD_AUTHN_DATA = 0x10, + UBMAD_AUTHN_ACK, + // cm send close request to all tjetty before remove kmod, one-way notification + UBMAD_CLOSE_REQ = 0x20, +}; + +struct ubmad_send_buf { + union ubcore_eid src_eid; + union ubcore_eid dst_eid; + + enum ubmad_msg_type msg_type; + uint32_t payload_len; + uint8_t payload[]; +}; + +/* callbacks for cm in ubmad_jfce_handler */ +struct ubmad_send_cr { + struct ubcore_cr *cr; +}; + +struct ubmad_recv_cr { + struct ubcore_cr *cr; + + // remote eid see cr->remote_id.eid + union ubcore_eid local_eid; + + enum ubmad_msg_type msg_type; + uint64_t payload; + uint32_t payload_len; // != cr->completion_len, latter including msg header size +}; + +struct ubmad_agent; +typedef int (*ubmad_send_handler)(struct ubmad_agent *agent, + struct ubmad_send_cr *cr); +typedef int (*ubmad_recv_handler)(struct ubmad_agent *agent, + struct ubmad_recv_cr *cr); +struct ubmad_agent { + struct ubcore_device *device; + ubmad_send_handler send_handler; + ubmad_recv_handler recv_handler; + void *usr_ctx; +}; + +int ubmad_init(void); +void ubmad_uninit(void); + +struct ubmad_agent *ubmad_register_agent(struct ubcore_device *device, + ubmad_send_handler send_handler, + ubmad_recv_handler recv_handler, + void *usr_ctx); +int ubmad_unregister_agent(struct ubmad_agent *agent); + +int ubmad_post_send(struct ubcore_device *device, + struct ubmad_send_buf *send_buf, + struct ubmad_send_buf **bad_send_buf); +int ubmad_ubc_send(struct ubcore_device *device, + struct ubcore_cm_send_buf *send_buf); + +#endif /* UB_MAD_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h new file mode 100644 index 000000000000..e12b3b79d6ad --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_mad private header + * Author: Chen Yutao + * Create: 2025-01-13 + * Note: + * History: 2025-01-13: Create file + */ + +#ifndef UB_MAD_PRIV_H +#define UB_MAD_PRIV_H + +#include + +#include "ub_mad.h" + +/* well-known jetty (wk jetty) parameters */ +#define UBMAD_WK_JETTY_NUM 2 /* well-known jetty 0 and 1 only used in ubcm */ +#define UBMAD_WK_JETTY_ID_0 1U +#define UBMAD_WK_JETTY_ID_1 2U + +// jetty +#define UBMAD_JFS_DEPTH 512U +#define UBMAD_JFR_DEPTH 1024U +#define UBMAD_JFS_MAX_SGE_NUM 1 +#define UBMAD_JFR_MAX_SGE_NUM 1 +#define UBMAD_JETTY_ERR_TIMEOUT 17 + +// seg +#define UBMAD_SEND_SGE_NUM (UBMAD_JFS_DEPTH * 2) +#define UBMAD_RECV_SGE_NUM (UBMAD_JFR_DEPTH * 2) + +// tjetty +#define UBMAD_MAX_TJETTY_NUM 10240 + +/* datapath */ +#define UBMAD_MSG_VERSION_0 0 /* current version */ + +// reliable communication +#define UBMAD_MSN_HLIST_SIZE 1024 +#define UBMAD_MAX_SEID_NUM 1024 + +#define UBMAD_RETRANSMIT_MS 500 +#define UBMAD_RETRANSMIT_PERIOD msecs_to_jiffies(UBMAD_RETRANSMIT_MS) + +#define UBMAD_MAX_RETRY_CNT 4 +#define UBMAD_RX_BITMAP_SIZE 1024 + +#define UBMAD_TX_THREDSHOLD (UBMAD_JFS_DEPTH - 8) + +/* common */ +struct ubmad_bitmap { + unsigned long *bits; + uint32_t size; + uint64_t right_end; /* Only for RX side */ + spinlock_t lock; +}; + +struct ubmad_msn_mgr { + atomic64_t msn_gen; // msn generator, increased with each post_send + + // msn_hlist holds msn that posted but not ack yet. key: msn, val: msn_node + struct hlist_head msn_hlist[UBMAD_MSN_HLIST_SIZE]; // ubmad_msn_node + spinlock_t msn_hlist_lock; +}; + +/* jetty */ +struct ubmad_jetty_resource { + /* jetty */ + uint32_t jetty_id; + struct ubcore_jfc *jfc_s; // send + struct ubcore_jfc *jfc_r; // recv + struct ubcore_jfr *jfr; + struct ubcore_jetty *jetty; /* well-known jetty */ + atomic_t tx_in_queue; + + /* seg */ + // each post uses one sge in the seg + // send + struct ubcore_target_seg *send_seg; + struct ubmad_bitmap *send_seg_bitmap; + // recv + struct ubcore_target_seg *recv_seg; + struct ubmad_bitmap *recv_seg_bitmap; + + /* tjetty */ + // key: ubcore_eid dst_eid, val: ubmad_tjetty tjetty + struct hlist_head tjetty_hlist[UBMAD_MAX_TJETTY_NUM]; + spinlock_t tjetty_hlist_lock; + + /* reliable communication */ + // source eid hlist, only for target. key: src eid, val: seid_node. + struct hlist_head seid_hlist[UBMAD_MAX_SEID_NUM]; // ubmad_seid_node + spinlock_t seid_hlist_lock; +}; + +struct ubmad_tjetty { + struct ubcore_tjetty *tjetty; + struct kref kref; + struct hlist_node node; // ubmad_device_priv.tjetty_hlist + + /* reliable communication */ + struct ubmad_msn_mgr msn_mgr; // for retransmit, only for initiator +}; + +/* device */ +// device contains resources used inside ubmad, including jetty, seg and etc. +struct ubmad_device_priv { + struct ubcore_device *device; + struct kref kref; + struct list_head node; // g_ubmad_device_list + struct ubcore_event_handler handler; + + /** resources **/ + bool valid; // following resources inited or not + struct ubmad_jetty_resource + jetty_rsrc[UBMAD_WK_JETTY_NUM]; // well-known jetty resource + /** end of resources **/ + + /* reliable communication */ + struct workqueue_struct + *rt_wq; // retransmit work queue, only for initiator + struct ubcore_eid_info eid_info; + bool has_create_jetty_rsrc; +}; + +/* agent */ +// agent contains resources used between ubmad and ubcm. +struct ubmad_agent_priv { + struct ubmad_agent agent; + struct kref kref; + struct list_head node; // g_ubmad_agent_list + struct workqueue_struct *jfce_wq; // ubmad_jfce_work +}; + +/** datapath **/ +/* msg */ +/* + * 1. considering 8B alignment, layout is not logical. + * 2. msg is stored in sge rather than alloc. + */ +struct ubmad_msg { + uint8_t version; + uint8_t msg_type; // ubmad_msg_type + uint16_t payload_len; + uint32_t reserved; // reserved for 8B aligned + + uint64_t msn; // Message sequence number + + uint8_t payload[]; +}; + +/* poll */ +/* + * msg not processed right after poll jfc in ubmad_jfce_handler(), but tranformed to ubmad_jfce_work + * and left for workqueue. + */ +enum ubmad_jfce_work_type { UBMAD_SEND_WORK, UBMAD_RECV_WORK }; + +struct ubmad_jfce_work { + struct work_struct work; // ubmad_agent_priv.jfce_wq + enum ubmad_jfce_work_type type; + + struct ubcore_jfc *jfc; + struct ubmad_agent_priv *agent_priv; +}; + +/** reliable communication **/ +/* for initiator (data sender, ack recver) */ +// add msn_node to msn_hlist when post and remove when recv ack +struct ubmad_msn_node { + struct hlist_node node; // ubmad_msn_mgr.msn_hlist + uint64_t msn; +}; + +/* try to find msn_node in msn_hlist when timeout. If find, repost and re-add work, + * else indicating already ack, free work. + */ +struct ubmad_rt_work { + struct delayed_work delay_work; // ubmad_device_priv.rt_wq + + uint64_t msn; + uint32_t rt_cnt; /* Retry count, no larger than UBMAD_MAX_RETRY_CNT */ + struct ubmad_msn_mgr *msn_mgr; + + struct ubmad_msg *msg; + struct ubmad_tjetty *tjetty; + struct ubmad_jetty_resource *rsrc; + struct workqueue_struct *rt_wq; +}; + +/* for target (data recver, ack sender) */ +struct ubmad_seid_node { + struct hlist_node node; // ubmad_device_priv.seid_hlist + union ubcore_eid seid; + struct kref kref; + + atomic64_t expected_msn; + struct ubmad_bitmap *rx_bitmap; +}; + +/* common */ +struct ubmad_bitmap *ubmad_create_bitmap(uint32_t bitmap_size); +void ubmad_destroy_bitmap(struct ubmad_bitmap *bitmap); +uint32_t ubmad_bitmap_get_id(struct ubmad_bitmap *bitmap); +int ubmad_bitmap_put_id(struct ubmad_bitmap *bitmap, uint32_t id); +bool ubmad_bitmap_test_id(struct ubmad_bitmap *bitmap, uint32_t id); +int ubmad_bitmap_set_id(struct ubmad_bitmap *bitmap, uint32_t id); + +/* jetty */ +struct ubmad_tjetty *ubmad_get_tjetty(union ubcore_eid *dst_eid, + struct ubmad_jetty_resource *rsrc); +void ubmad_put_tjetty(struct ubmad_tjetty *tjetty); + +struct ubmad_tjetty *ubmad_import_jetty(struct ubcore_device *device, + struct ubmad_jetty_resource *rsrc, + union ubcore_eid *dst_eid); + +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_s(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_s); +struct ubmad_jetty_resource * +ubmad_get_jetty_rsrc_by_jfc_r(struct ubmad_device_priv *dev_priv, + struct ubcore_jfc *jfc_r); + +void ubmad_remove_tjetty(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc); + +/* device */ +struct ubmad_device_priv *ubmad_get_device_priv(struct ubcore_device *device); +void ubmad_put_device_priv(struct ubmad_device_priv *dev_priv); + +/* agent */ +struct ubmad_agent_priv *ubmad_get_agent_priv(struct ubcore_device *device); +void ubmad_put_agent_priv(struct ubmad_agent_priv *agent_priv); + +/** datapath **/ +/* reliable communication */ +void ubmad_init_msn_mgr(struct ubmad_msn_mgr *msn_mgr); +void ubmad_uninit_msn_mgr(struct ubmad_msn_mgr *msn_mgr); + +void ubmad_init_seid_hlist(struct ubmad_jetty_resource *rsrc); +void ubmad_uninit_seid_hlist(struct ubmad_jetty_resource *rsrc); + +/* post */ +int ubmad_repost_send(struct ubmad_msg *msg, struct ubmad_tjetty *tjetty, + struct ubcore_target_seg *send_seg, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc); +void ubmad_post_send_close_req(struct ubmad_jetty_resource *rsrc, + struct ubcore_tjetty *tjetty); + +int ubmad_post_recv(struct ubmad_jetty_resource *rsrc); + +/* poll */ +void ubmad_jfce_handler_s(struct ubcore_jfc *jfc); +void ubmad_jfce_handler_r(struct ubcore_jfc *jfc); + +#endif /* UB_MAD_PRIV_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c new file mode 100644 index 000000000000..0fad31f42646 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c @@ -0,0 +1,891 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_cm generic netlink implementation + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: create file + */ + +#include +#include +#include +#include + + + +#include "ub_mad.h" +#include "ub_cm.h" +#include "ubcm_log.h" +#include "ubcm_genl.h" + +struct ubcm_uvs_list { + spinlock_t lock; /* for both uvs list and eid_hlist of uvs_node */ + struct list_head list; /* uvs genl nodes list */ + int count; /* number of uvs genl nodes in list */ + uint32_t next_id; /* next id for uvs */ +}; + +static struct ubcm_uvs_list g_ubcm_uvs_list = { 0 }; +static inline struct ubcm_uvs_list *get_uvs_list(void) +{ + return &g_ubcm_uvs_list; +} +atomic_t g_ubcm_nlmsg_seq; + +static int ubcm_genl_uvs_add_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_remove_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_add_eid_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_del_eid_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_msg_handler(struct sk_buff *skb, + struct genl_info *info); +static int ubcm_genl_uvs_authn_handler(struct sk_buff *skb, + struct genl_info *info); + +static int ubcm_nl_notifier_call(struct notifier_block *nb, + unsigned long action, void *data); + +static const struct nla_policy g_ubcm_policy[NUM_UBCM_ATTR] = { + [UBCM_ATTR_UNSPEC] = { 0 }, + [UBCM_HDR_COMMAND] = { .type = NLA_U32 }, + [UBCM_HDR_ARGS_LEN] = { .type = NLA_U32 }, + [UBCM_HDR_ARGS_ADDR] = { .type = NLA_U64 }, + [UBCM_ATTR_NS_MODE] = { .type = NLA_U8 }, + [UBCM_ATTR_DEV_NAME] = { .type = NLA_STRING, + .len = UBCORE_MAX_DEV_NAME - 1 }, + [UBCM_ATTR_NS_FD] = { .type = NLA_U32 }, + [UBCM_MSG_SEQ] = { .type = NLA_U32 }, + [UBCM_MSG_TYPE] = { .type = NLA_U32 }, + [UBCM_SRC_ID] = { .len = UBCORE_EID_SIZE }, + [UBCM_DST_ID] = { .len = UBCORE_EID_SIZE }, + [UBCM_RESERVED] = { .type = NLA_U32 }, + [UBCM_PAYLOAD_DATA] = { .type = NLA_BINARY } +}; + +static const struct genl_ops g_ubcm_genl_ops[] = { + { .cmd = UBCM_CMD_UVS_ADD, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_add_handler }, + { .cmd = UBCM_CMD_UVS_REMOVE, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_remove_handler }, + { .cmd = UBCM_CMD_UVS_ADD_EID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_add_eid_handler }, + { .cmd = UBCM_CMD_UVS_DEL_EID, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_del_eid_handler }, + { .cmd = UBCM_CMD_UVS_MSG, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_msg_handler }, + { .cmd = UBCM_CMD_UVS_AUTHN, + .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, + + .doit = ubcm_genl_uvs_authn_handler } +}; + +struct genl_family g_ubcm_genl_family __ro_after_init = { + .hdrsize = 0, + .name = UBCM_GENL_FAMILY_NAME, + .version = UBCM_GENL_FAMILY_VERSION, + .maxattr = UBCM_ATTR_MAX, + .policy = g_ubcm_policy, + + .resv_start_op = UBCM_CMD_NUM, + + .netnsok = true, + .module = THIS_MODULE, + .ops = g_ubcm_genl_ops, + .n_ops = ARRAY_SIZE(g_ubcm_genl_ops) +}; + +static struct notifier_block g_ubcm_nl_notifier = { + .notifier_call = ubcm_nl_notifier_call, +}; + +static int ubcm_check_uvs_para(struct genl_info *info, uint32_t *length) +{ + uint32_t payload_len; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len == 0 || payload_len > UBCM_MAX_UVS_NAME_LEN) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return -EINVAL; + } + + *length = payload_len; + + return 0; +} + +static int ubcm_copy_uvs_name(struct genl_info *info, char *uvs_name, + uint32_t payload_len) +{ + (void)memcpy(uvs_name, nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + uvs_name[UBCM_MAX_UVS_NAME_LEN - 1] = '\0'; + + return 0; +} + +static struct ubcm_uvs_genl_node * +ubcm_lookup_genl_node_lockless(const char *uvs_name, + struct ubcm_uvs_list *uvs_list) +{ + struct ubcm_uvs_genl_node *node, *next; + + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + if (strcmp(node->name, uvs_name) == 0) + return node; + } + return NULL; +} + +static int ubcm_genl_uvs_add(const char *uvs_name, uint32_t genl_port, + struct sock *genl_sock) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *new = NULL; + struct ubcm_uvs_genl_node *node; + int idx; + + new = kzalloc(sizeof(struct ubcm_uvs_genl_node), GFP_ATOMIC); + if (new == NULL) + return -ENOMEM; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_genl_node_lockless(uvs_name, uvs_list); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Uvs: %s already exist.\n", uvs_name); + kfree(new); + return -EEXIST; + } + + (void)strscpy(new->name, uvs_name, UBCM_MAX_UVS_NAME_LEN); + kref_init(&new->ref); + new->pid = (uint32_t)task_tgid_vnr(current); + new->id = uvs_list->next_id; + new->state = UBCM_UVS_STATE_ALIVE; + atomic_set(&new->map2ue, 0); + new->genl_port = genl_port; + new->genl_sock = genl_sock; + for (idx = 0; idx < UBCM_EID_TABLE_SIZE; idx++) + INIT_HLIST_HEAD(&new->eid_hlist[idx]); + + list_add_tail(&new->list_node, &uvs_list->list); + uvs_list->count++; + uvs_list->next_id++; + spin_unlock(&uvs_list->lock); + + ubcm_log_info("Finish to add uvs node: %s, id: %u.\n", uvs_name, + new->id); + return 0; +} + +static int ubcm_genl_uvs_add_handler(struct sk_buff *skb, + struct genl_info *info) +{ + char uvs_name[UBCM_MAX_UVS_NAME_LEN] = { 0 }; + uint32_t payload_len; + int ret; + + ret = ubcm_check_uvs_para(info, &payload_len); + if (ret != 0) { + ubcm_log_err("Invalid add parameter.\n"); + return ret; + } + + ret = ubcm_copy_uvs_name(info, uvs_name, payload_len); + if (ret != 0) + return ret; + + ret = ubcm_genl_uvs_add(uvs_name, info->snd_portid, + genl_info_net(info)->genl_sock); + if (ret != 0) { + ubcm_log_err("Failed to add uvs genl node: %s.\n", uvs_name); + return ret; + } + + return 0; +} + +void ubcm_uvs_kref_get(struct ubcm_uvs_genl_node *node) +{ + kref_get(&node->ref); +} + +static void ubcm_uvs_kref_release(struct kref *ref) +{ + struct ubcm_uvs_genl_node *node = + container_of(ref, struct ubcm_uvs_genl_node, ref); + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + int i; + + spin_lock(&uvs_list->lock); + for (i = 0; i < UBCM_EID_TABLE_SIZE; i++) { + hlist_for_each_entry_safe(eid_node, next, &node->eid_hlist[i], + node) { + hlist_del(&eid_node->node); + kfree(eid_node); + } + } + spin_unlock(&uvs_list->lock); + + ubcm_log_info("Release uvs: %s, uvs_id: %u.\n", node->name, node->id); + kfree(node); +} + +void ubcm_uvs_kref_put(struct ubcm_uvs_genl_node *node) +{ + uint32_t refcnt; + + refcnt = kref_read(&node->ref); + ubcm_log_info("kref_put: uvs %s, id %u, old refcnt %u, new refcnt %u\n", + node->name, node->id, refcnt, + refcnt > 0 ? refcnt - 1 : 0); + + (void)kref_put(&node->ref, ubcm_uvs_kref_release); +} + +static int ubcm_genl_uvs_remove(const char *uvs_name) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_genl_node_lockless(uvs_name, uvs_list); + if (node == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to lookup uvs node: %s.\n", uvs_name); + return -ENOENT; + } + + if (node->state == UBCM_UVS_STATE_DEAD) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Uvs: %s already set dead.\n", uvs_name); + return -EPERM; + } + + if (atomic_read(&node->map2ue) != 0) { + node->state = UBCM_UVS_STATE_DEAD; + spin_unlock(&uvs_list->lock); + ubcm_log_info( + "Uvs %s was referenced by ue, set dead and keep it.\n", + uvs_name); + return 0; + } + + list_del(&node->list_node); + node->state = UBCM_UVS_STATE_DEAD; + uvs_list->count--; + spin_unlock(&uvs_list->lock); + ubcm_uvs_kref_put(node); + + ubcm_log_info("Uvs: %s removed.\n", uvs_name); + return 0; +} + +static int ubcm_genl_uvs_remove_handler(struct sk_buff *skb, + struct genl_info *info) +{ + char uvs_name[UBCM_MAX_UVS_NAME_LEN]; + uint32_t payload_len; + int ret; + + ret = ubcm_check_uvs_para(info, &payload_len); + if (ret != 0) { + ubcm_log_err("Invalid remove parameter.\n"); + return ret; + } + + ret = ubcm_copy_uvs_name(info, uvs_name, payload_len); + if (ret != 0) + return ret; + + ret = ubcm_genl_uvs_remove(uvs_name); + if (ret != 0) { + ubcm_log_err("Failed to remove uvs genl node: %s.\n", uvs_name); + return ret; + } + + return 0; +} + +static int ubcm_parse_uvs_eid_para(struct genl_info *info, + struct ubcm_nlmsg_op_eid *para, + enum ubcm_genl_msg_type type) +{ + uint32_t payload_len; + uint32_t msg_type; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len != sizeof(struct ubcm_nlmsg_op_eid)) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return -EINVAL; + } + + msg_type = nla_get_u32(info->attrs[UBCM_MSG_TYPE]); + if (msg_type != (uint32_t)type) { + ubcm_log_err("Invalid msg_type: %u, type: %u.\n", msg_type, + (uint32_t)type); + return -EINVAL; + } + + (void)memcpy(para, nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + para->uvs_name[UBCM_MAX_UVS_NAME_LEN - 1] = '\0'; + return 0; +} + +static struct ubcm_uvs_eid_node * +ubcm_find_eid_node_lockless(struct ubcm_uvs_genl_node *uvs, uint32_t hash, + union ubcore_eid *eid) +{ + /* No need to check hash as it is no larger than UBCM_EID_TABLE_SIZE */ + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + + hlist_for_each_entry_safe(eid_node, next, &uvs->eid_hlist[hash], node) { + if (memcmp(&eid_node->eid, eid, sizeof(union ubcore_eid)) == 0) + return eid_node; + } + + ubcm_log_info("Failed to lookup eid node: " EID_FMT ", hash: %u.\n", + EID_ARGS(*eid), hash); + return NULL; +} + +static int ubcm_add_uvs_eid(struct ubcm_nlmsg_op_eid *para) +{ + uint32_t hash = jhash(¶->eid, sizeof(union ubcore_eid), 0) % + UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_eid_node *node, *new; + struct ubcm_uvs_genl_node *uvs; + + /* Step 1: Lookup eid node to judge whether to create new node */ + spin_lock(&uvs_list->lock); + uvs = ubcm_lookup_genl_node_lockless(para->uvs_name, uvs_list); + if (uvs == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to find uvs: %s.\n", para->uvs_name); + return -EINVAL; + } + + if (uvs->eid_cnt >= UBCM_EID_TABLE_SIZE) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Invalid operation, eid_cnt: %u.\n", uvs->eid_cnt); + return -EINVAL; + } + + node = ubcm_find_eid_node_lockless(uvs, hash, ¶->eid); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Eid: " EID_FMT " already added in uvs: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + return -1; + } + spin_unlock(&uvs_list->lock); + + /* Step 2: Create new eid node */ + new = kzalloc(sizeof(struct ubcm_uvs_genl_node), GFP_KERNEL); + if (new == NULL) + return -ENOMEM; + new->eid_idx = para->eid_idx; + new->eid = para->eid; + INIT_HLIST_NODE(&new->node); + + /* Step 3: Lookup eid node to judge whether to add the new node into hlist */ + spin_lock(&uvs_list->lock); + node = ubcm_find_eid_node_lockless(uvs, hash, ¶->eid); + if (node != NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_warn("Eid: " EID_FMT " added in uvs: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + kfree(new); + return -1; + } + hlist_add_head(&new->node, &uvs->eid_hlist[hash]); + uvs->eid_cnt++; + spin_unlock(&uvs_list->lock); + ubcm_log_info("Finish to add uvs eid: " EID_FMT ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + + return 0; +} + +static int ubcm_genl_uvs_add_eid_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_nlmsg_op_eid para = { 0 }; + int ret; + + ret = ubcm_parse_uvs_eid_para(info, ¶, UBCM_CMD_UVS_ADD_EID); + if (ret != 0) + return ret; + + return ubcm_add_uvs_eid(¶); +} + +static int ubcm_find_del_eid_node_lockless(struct ubcm_uvs_genl_node *uvs, + union ubcore_eid *eid) +{ + uint32_t hash = + jhash(eid, sizeof(union ubcore_eid), 0) % UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_eid_node *eid_node; + struct hlist_node *next; + + hlist_for_each_entry_safe(eid_node, next, &uvs->eid_hlist[hash], node) { + if (memcmp(&eid_node->eid, eid, sizeof(union ubcore_eid)) == + 0) { + hlist_del(&eid_node->node); + kfree(eid_node); + uvs->eid_cnt--; + return 0; + } + } + + ubcm_log_err("Failed to lookup eid node: " EID_FMT ", hash: %u.\n", + EID_ARGS(*eid), hash); + return -1; +} + +static int ubcm_del_uvs_eid(struct ubcm_nlmsg_op_eid *para) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *uvs; + int ret; + + spin_lock(&uvs_list->lock); + uvs = ubcm_lookup_genl_node_lockless(para->uvs_name, uvs_list); + if (uvs == NULL) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed find uvs: %s.\n", para->uvs_name); + return -EINVAL; + } + if (uvs->eid_cnt == 0) { + spin_unlock(&uvs_list->lock); + ubcm_log_err("Invalid operation, there is no valid eid.\n"); + return -EINVAL; + } + + ret = ubcm_find_del_eid_node_lockless(uvs, ¶->eid); + spin_unlock(&uvs_list->lock); + + if (ret != 0) { + ubcm_log_err("Failed to delete uvs eid: " EID_FMT + ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + } else { + ubcm_log_info("Finish to delete uvs eid: " EID_FMT + ", uvs_name: %s.\n", + EID_ARGS(para->eid), para->uvs_name); + } + return ret; +} + +static int ubcm_genl_uvs_del_eid_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_nlmsg_op_eid para = { 0 }; + int ret; + + ret = ubcm_parse_uvs_eid_para(info, ¶, UBCM_CMD_UVS_DEL_EID); + if (ret != 0) + return ret; + + return ubcm_del_uvs_eid(¶); +} + +static struct ubmad_send_buf *ubcm_get_nlmsg_send_buf(struct genl_info *info) +{ + struct ubmad_send_buf *send_buf; + uint32_t payload_len; + + if (!info->attrs[UBCM_PAYLOAD_DATA]) { + ubcm_log_err("Invalid parameter.\n"); + return NULL; + } + + payload_len = (uint32_t)nla_len(info->attrs[UBCM_PAYLOAD_DATA]); + if (payload_len > UBCM_MAX_NL_MSG_BUF_LEN) { + ubcm_log_err("Invalid payload_len: %u.\n", payload_len); + return NULL; + } + + send_buf = + kzalloc((size_t)(sizeof(struct ubmad_send_buf) + payload_len), + GFP_KERNEL); + if (send_buf == NULL) + return NULL; + + send_buf->payload_len = payload_len; + send_buf->msg_type = UBMAD_CONN_DATA; // using wk_jetty0 + + if (info->attrs[UBCM_SRC_ID]) + (void)memcpy(&send_buf->src_eid, + nla_data(info->attrs[UBCM_SRC_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_DST_ID]) + (void)memcpy(&send_buf->dst_eid, + nla_data(info->attrs[UBCM_DST_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_PAYLOAD_DATA]) + (void)memcpy(send_buf->payload, + nla_data(info->attrs[UBCM_PAYLOAD_DATA]), + payload_len); + + return send_buf; +} + +static int ubcm_genl_uvs_msg_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_send_buf *send_buf; + struct ubcm_work *cm_work; + bool ret; + + send_buf = ubcm_get_nlmsg_send_buf(info); + if (send_buf == NULL) { + ubcm_log_err("Failed to get nlmsg send buffer.\n"); + return -1; + } + + cm_work = kzalloc(sizeof(struct ubcm_work), GFP_ATOMIC); + if (cm_work == NULL) { + kfree(send_buf); + return -ENOMEM; + } + cm_work->send_buf = send_buf; + + INIT_WORK(&cm_work->work, ubcm_work_handler); + /* return value: 1-work is executing in work-queue; 0-work is not executing */ + ret = queue_work(cm_ctx->wq, &cm_work->work); + if (!ret) { + kfree(cm_work); + kfree(send_buf); + ubcm_log_err("Cm work already in workqueue, ret: %u.\n", ret); + return -1; + } + + return 0; +} + +static struct ubmad_send_buf *ubcm_get_nlmsg_authn_buf(struct genl_info *info) +{ + struct ubmad_send_buf *send_buf; + + send_buf = kzalloc((size_t)(sizeof(struct ubmad_send_buf)), GFP_KERNEL); + if (send_buf == NULL) + return NULL; + send_buf->payload_len = 0; + send_buf->msg_type = UBMAD_AUTHN_DATA; // using wk_jetty1 + + if (info->attrs[UBCM_SRC_ID]) + (void)memcpy(&send_buf->src_eid, + nla_data(info->attrs[UBCM_SRC_ID]), + UBCORE_EID_SIZE); + + if (info->attrs[UBCM_DST_ID]) + (void)memcpy(&send_buf->dst_eid, + nla_data(info->attrs[UBCM_DST_ID]), + UBCORE_EID_SIZE); + + return send_buf; +} + +static int ubcm_genl_uvs_authn_handler(struct sk_buff *skb, + struct genl_info *info) +{ + struct ubcm_context *cm_ctx = get_ubcm_ctx(); + struct ubmad_send_buf *send_buf; + struct ubcm_work *cm_work; + bool ret; + + send_buf = ubcm_get_nlmsg_authn_buf(info); + if (send_buf == NULL) { + ubcm_log_err("Failed to get nlmsg authentication buffer.\n"); + return -1; + } + + cm_work = kzalloc(sizeof(struct ubcm_work), GFP_ATOMIC); + if (cm_work == NULL) { + kfree(send_buf); + return -ENOMEM; + } + cm_work->send_buf = send_buf; + + INIT_WORK(&cm_work->work, ubcm_work_handler); + /* return value: 1-work is executing in work-queue; 0-work is not executing */ + ret = queue_work(cm_ctx->wq, &cm_work->work); + if (!ret) { + kfree(cm_work); + kfree(send_buf); + ubcm_log_err("Cm work already in workqueue, ret: %u.\n", ret); + return -1; + } + + return 0; +} + +static struct ubcm_uvs_genl_node * +ubcm_lookup_node_by_portid_lockless(struct ubcm_uvs_list *uvs_list, + uint32_t portid) +{ + struct ubcm_uvs_genl_node *result = NULL; + struct ubcm_uvs_genl_node *node, *next; + + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + if (node->genl_port == portid) { + result = node; + break; + } + } + + return result; +} + +static void ubcm_unset_genl_pid(uint32_t portid) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node; + + spin_lock(&uvs_list->lock); + node = ubcm_lookup_node_by_portid_lockless(uvs_list, portid); + if (node == NULL) { + spin_unlock(&uvs_list->lock); + return; + } + + list_del(&node->list_node); + spin_unlock(&uvs_list->lock); + + ubcm_log_err("Finish to unset port: %u for uvs: %s, id: %u.\n", portid, + node->name, node->id); + node->genl_port = UBCM_GENL_INVALID_PORT; + node->genl_sock = NULL; + /* free node buffer */ + ubcm_uvs_kref_put(node); +} + +static int ubcm_nl_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct netlink_notify *notify = data; + + if (action != NETLINK_URELEASE || notify == NULL || + notify->protocol != NETLINK_GENERIC) + return NOTIFY_DONE; + + ubcm_unset_genl_pid(notify->portid); + return NOTIFY_DONE; +} + +static void ubcm_uvs_list_init(void) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + + spin_lock_init(&uvs_list->lock); + INIT_LIST_HEAD(&uvs_list->list); + uvs_list->count = 0; + uvs_list->next_id = 1; /* 0 for invalid uvs id */ +} + +static void ubcm_uvs_list_uninit(void) +{ + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *node, *next; + + spin_lock(&uvs_list->lock); + list_for_each_entry_safe(node, next, &uvs_list->list, list_node) { + list_del(&node->list_node); + kfree(node); + } + uvs_list->count = 0; + uvs_list->next_id = 0; + spin_unlock(&uvs_list->lock); +} + +int ubcm_genl_init(void) +{ + int ret; + + ubcm_uvs_list_init(); + ret = genl_register_family(&g_ubcm_genl_family); + if (ret != 0) + ubcm_log_err( + "Failed to init ubcm generic netlink family, ret: %d.\n", + ret); + + ret = netlink_register_notifier(&g_ubcm_nl_notifier); + if (ret != 0) + ubcm_log_err("Failed to register notifier, ret: %d.\n", ret); + + ubcm_log_info("Finish to init ubcm generic netlink.\n"); + return ret; +} + +void ubcm_genl_uninit(void) +{ + (void)netlink_unregister_notifier(&g_ubcm_nl_notifier); + (void)genl_unregister_family(&g_ubcm_genl_family); + ubcm_uvs_list_uninit(); +} + +struct ubcm_nlmsg *ubcm_alloc_genl_msg(struct ubmad_recv_cr *recv_cr) +{ + uint32_t payload_len = recv_cr->payload_len; + struct ubcm_nlmsg *nlmsg; + + nlmsg = kzalloc(sizeof(struct ubcm_nlmsg) + payload_len, GFP_KERNEL); + if (nlmsg == NULL) + return NULL; + + nlmsg->src_eid = recv_cr->cr->remote_id.eid; + nlmsg->dst_eid = recv_cr->local_eid; + nlmsg->msg_type = UBCM_CMD_UVS_MSG; + nlmsg->payload_len = payload_len; + (void)memcpy(nlmsg->payload, (const void *)recv_cr->payload, + payload_len); + nlmsg->nlmsg_seq = ubcm_get_nlmsg_seq(); + + return nlmsg; +} + +struct ubcm_nlmsg *ubcm_alloc_genl_authn_msg(struct ubmad_recv_cr *recv_cr) +{ + uint32_t payload_len = recv_cr->payload_len; + struct ubcm_nlmsg *nlmsg; + + if (payload_len != 0) { + ubcm_log_err("Invalid payload length: %u.\n", payload_len); + return ERR_PTR(-EINVAL); + } + nlmsg = kzalloc(sizeof(struct ubcm_nlmsg), GFP_KERNEL); + if (nlmsg == NULL) + return NULL; + + nlmsg->src_eid = recv_cr->cr->remote_id.eid; + nlmsg->dst_eid = recv_cr->local_eid; + nlmsg->msg_type = UBCM_CMD_UVS_AUTHN; + nlmsg->payload_len = payload_len; + nlmsg->nlmsg_seq = ubcm_get_nlmsg_seq(); + + return nlmsg; +} + +struct ubcm_uvs_genl_node *ubcm_find_get_uvs_by_eid(union ubcore_eid *eid) +{ + uint32_t hash = + jhash(eid, sizeof(union ubcore_eid), 0) % UBCM_EID_TABLE_SIZE; + struct ubcm_uvs_list *uvs_list = get_uvs_list(); + struct ubcm_uvs_genl_node *uvs, *next_uvs; + struct ubcm_uvs_eid_node *node; + struct hlist_node *next_node; + + spin_lock(&uvs_list->lock); + list_for_each_entry_safe(uvs, next_uvs, &uvs_list->list, list_node) { + if (IS_ERR_OR_NULL(uvs) || uvs->eid_cnt == 0) + continue; + hlist_for_each_entry_safe(node, next_node, + &uvs->eid_hlist[hash], node) { + if (memcmp(&node->eid, eid, sizeof(union ubcore_eid)) == + 0) { + ubcm_uvs_kref_get(uvs); + spin_unlock(&uvs_list->lock); + ubcm_log_info("Find uvs: %s by eid: " EID_FMT + ".\n", + uvs->name, EID_ARGS(*eid)); + return uvs; + } + } + } + spin_unlock(&uvs_list->lock); + ubcm_log_err("Failed to find uvs by eid: " EID_FMT ".\n", + EID_ARGS(*eid)); + + return NULL; +} + +int ubcm_genl_unicast(struct ubcm_nlmsg *msg, uint32_t len, + struct ubcm_uvs_genl_node *uvs) +{ + struct sk_buff *nl_skb; + struct nlmsghdr *nlh; + int ret; + + if (msg == NULL || uvs->genl_sock == NULL || + uvs->genl_port == UBCM_GENL_INVALID_PORT) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + /* create sk_buff */ + nl_skb = genlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (nl_skb == NULL) { + ubcm_log_err("Failed to creatge nl_skb.\n"); + return -1; + } + + /* set genl head */ + nlh = genlmsg_put(nl_skb, uvs->genl_port, msg->nlmsg_seq, + &g_ubcm_genl_family, NLM_F_ACK, + (uint8_t)msg->msg_type); + if (nlh == NULL) { + ubcm_log_err("Failed to nlmsg put.\n"); + nlmsg_free(nl_skb); + return -1; + } + if (nla_put_u32(nl_skb, UBCM_MSG_SEQ, msg->nlmsg_seq) || + nla_put_u32(nl_skb, UBCM_MSG_TYPE, msg->msg_type) || + nla_put(nl_skb, UBCM_SRC_ID, (int)sizeof(union ubcore_eid), + &msg->src_eid) || + nla_put(nl_skb, UBCM_DST_ID, (int)sizeof(union ubcore_eid), + &msg->dst_eid) || + nla_put(nl_skb, UBCM_PAYLOAD_DATA, (int)msg->payload_len, + msg->payload)) { + ubcm_log_err("Failed in nla_put operations.\n"); + nlmsg_free(nl_skb); + return -1; + } + + genlmsg_end(nl_skb, nlh); + ubcm_log_info("Finish to send genl msg, seq: %u, payload_len: %u.\n", + msg->nlmsg_seq, msg->payload_len); + + ret = nlmsg_unicast(uvs->genl_sock, nl_skb, uvs->genl_port); + if (ret != 0) { + ubcm_log_err("Failed to send genl msg, ret: %d.\n", ret); + nlmsg_free(nl_skb); + return ret; + } + + return 0; +} diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h new file mode 100644 index 000000000000..3e69e6bc941a --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubcm generic netlink header + * Author: Chen Yutao + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UBCM_GENL_H +#define UBCM_GENL_H + +#include + + + +#include "ub_mad.h" + +/* NETLINK_GENERIC related info */ +#define UBCM_GENL_FAMILY_NAME "UBCM_GENL" +#define UBCM_GENL_FAMILY_VERSION 1 +#define UBCM_GENL_INVALID_PORT 0 +#define UBCM_MAX_NL_MSG_BUF_LEN 1024 +#define UBCM_EID_TABLE_SIZE 256 /* Refer to TPSA_EID_IDX_TABLE_SIZE */ + +#define UBCM_MAX_UVS_NAME_LEN 64 + +enum ubcm_uvs_state { UBCM_UVS_STATE_DEAD = 0, UBCM_UVS_STATE_ALIVE }; + +enum ubcm_genl_attr { /* Refer to enum uvs_cm_genl_attr */ + UBCM_ATTR_UNSPEC, + UBCM_HDR_COMMAND, + UBCM_HDR_ARGS_LEN, + UBCM_HDR_ARGS_ADDR, + UBCM_ATTR_NS_MODE, + UBCM_ATTR_DEV_NAME, + UBCM_ATTR_NS_FD, + UBCM_MSG_SEQ, + UBCM_MSG_TYPE, + UBCM_SRC_ID, + UBCM_DST_ID, + UBCM_RESERVED, + UBCM_PAYLOAD_DATA, + UBCM_ATTR_AFTER_LAST, + NUM_UBCM_ATTR = UBCM_ATTR_AFTER_LAST, + UBCM_ATTR_MAX = UBCM_ATTR_AFTER_LAST - 1 +}; + +/* Handling generic netlnik messages from UVS, only forward messages */ +enum ubcm_genl_msg_type { + UBCM_CMD_UVS_ADD = 0, + UBCM_CMD_UVS_REMOVE, + UBCM_CMD_UVS_ADD_EID, + UBCM_CMD_UVS_DEL_EID, + UBCM_CMD_UVS_MSG, + UBCM_CMD_UVS_AUTHN, /* Authentication */ + UBCM_CMD_NUM +}; + +struct ubcm_nlmsg { /* Refer to uvs_nl_cm_msg_t */ + uint32_t nlmsg_seq; + uint32_t msg_type; /* Refer to ubcm_genl_msg_type */ + union ubcore_eid src_eid; + union ubcore_eid dst_eid; + uint32_t payload_len; + uint32_t reserved; + uint8_t payload[]; +}; + +struct ubcm_uvs_eid_node { + struct hlist_node node; + uint32_t eid_idx; + uint32_t reserved; + union ubcore_eid eid; +}; + +struct ubcm_uvs_genl_node { + struct list_head list_node; + struct kref ref; + char name[UBCM_MAX_UVS_NAME_LEN]; /* name to identify UVS */ + enum ubcm_uvs_state state; + uint32_t id; + uint32_t policy; + uint32_t genl_port; /* uvs genl port */ + struct sock *genl_sock; + uint32_t pid; + atomic_t map2ue; + atomic_t nl_wait_buffer; + struct hlist_head eid_hlist + [UBCM_EID_TABLE_SIZE]; /* Storing struct ubcm_uvs_eid_node */ + uint32_t eid_cnt; +}; + +/* Payload structure for UBCM_CMD_UVS_ADD_EID or UBCM_CMD_UVS_DEL_EID */ +struct ubcm_nlmsg_op_eid { + uint32_t eid_idx; + uint32_t reserved; + union ubcore_eid eid; + char uvs_name[UBCM_MAX_UVS_NAME_LEN]; +}; + +extern atomic_t g_ubcm_nlmsg_seq; +static inline uint32_t ubcm_get_nlmsg_seq(void) +{ + return atomic_inc_return(&g_ubcm_nlmsg_seq); +} + +int ubcm_genl_init(void); +void ubcm_genl_uninit(void); + +struct ubcm_nlmsg *ubcm_alloc_genl_msg(struct ubmad_recv_cr *recv_cr); +struct ubcm_nlmsg *ubcm_alloc_genl_authn_msg(struct ubmad_recv_cr *recv_cr); + +void ubcm_uvs_kref_get(struct ubcm_uvs_genl_node *node); +void ubcm_uvs_kref_put(struct ubcm_uvs_genl_node *node); + +struct ubcm_uvs_genl_node *ubcm_find_get_uvs_by_eid(union ubcore_eid *eid); + +static inline uint32_t ubcm_nlmsg_len(struct ubcm_nlmsg *msg) +{ + return sizeof(struct ubcm_nlmsg) + msg->payload_len; +} + +/* Ubcm send nlmsg to UVS by netlink */ +int ubcm_genl_unicast(struct ubcm_nlmsg *msg, uint32_t len, + struct ubcm_uvs_genl_node *uvs); + +#endif /* UBCM_GENL_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_log.c b/drivers/ub/urma/ubcore/ubcm/ubcm_log.c new file mode 100644 index 000000000000..dd0154308276 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_log.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubcm log file + * Author: Qian Guoxin + * Create: 2025-01-10 + * Note: + * History: 2024-01-10: Create file + */ + +#include +#include "ubcm_log.h" + +uint32_t g_ubcm_log_level = UBCM_LOG_LEVEL_INFO; diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_log.h b/drivers/ub/urma/ubcore/ubcm/ubcm_log.h new file mode 100644 index 000000000000..3b3829c297b7 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_log.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubcm log head file + * Author: Qian Guoxin + * Create: 2025-01-10 + * Note: + * History: 2025-01-10: Create file + */ + +#ifndef UBCM_LOG_H +#define UBCM_LOG_H + +#include +#include + +enum ubcm_log_level { + UBCM_LOG_LEVEL_EMERG = 0, + UBCM_LOG_LEVEL_ALERT = 1, + UBCM_LOG_LEVEL_CRIT = 2, + UBCM_LOG_LEVEL_ERR = 3, + UBCM_LOG_LEVEL_WARNING = 4, + UBCM_LOG_LEVEL_NOTICE = 5, + UBCM_LOG_LEVEL_INFO = 6, + UBCM_LOG_LEVEL_DEBUG = 7, + UBCM_LOG_LEVEL_MAX = 8 +}; + +/* add log head info, "LogTag_UBCM|function|[line]| */ +#define UBCM_LOG_TAG "LogTag_UBCM" +#define ubcm_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBCM_LOG_TAG, __func__, __LINE__, ##args) + +#define UBCM_RATELIMIT_INTERVAL (5 * HZ) +#define UBCM_RATELIMIT_BURST 100 + +extern uint32_t g_ubcm_log_level; + +#define ubcm_log_info(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_INFO) \ + ubcm_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_err(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_ERR) \ + ubcm_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_warn(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_WARNING) \ + ubcm_log(warn, __VA_ARGS__); \ + } while (0) + +/* No need to record debug log by printk_ratelimited */ +#define ubcm_log_debug(...) \ + do { \ + if (g_ubcm_log_level >= UBCM_LOG_LEVEL_DEBUG) \ + ubcm_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define ubcm_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_INFO)) \ + ubcm_log(info, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_ERR)) \ + ubcm_log(err, __VA_ARGS__); \ + } while (0) + +#define ubcm_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBCM_RATELIMIT_INTERVAL, \ + UBCM_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubcm_log_level >= UBCM_LOG_LEVEL_WARNING)) \ + ubcm_log(warn, __VA_ARGS__); \ + } while (0) + +#endif /* UBCM_LOG_H */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c new file mode 100644 index 000000000000..3038ad13af49 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ub_mad datapath implementation + * Author: Chen Yutao + * Create: 2025-02-21 + * Note: + * History: 2025-02-21: create file + */ + +#include +#include + + +#include "ubcore_topo_info.h" +#include "net/ubcore_cm.h" +#include "ubcm_log.h" + +#include "ub_mad_priv.h" + +/** reliable communication **/ +/* msn mgr */ +void ubmad_init_msn_mgr(struct ubmad_msn_mgr *msn_mgr) +{ + uint32_t idx; + + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) + INIT_HLIST_HEAD(&msn_mgr->msn_hlist[idx]); + spin_lock_init(&msn_mgr->msn_hlist_lock); + atomic64_set(&msn_mgr->msn_gen, 0); // msn starts from 0 +} + +void ubmad_uninit_msn_mgr(struct ubmad_msn_mgr *msn_mgr) +{ + struct ubmad_msn_node *msn_node; + struct hlist_node *next; + unsigned long flag; + uint32_t idx; + + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) { + hlist_for_each_entry_safe(msn_node, next, + &msn_mgr->msn_hlist[idx], node) { + hlist_del(&msn_node->node); + kfree(msn_node); + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); +} + +/* msn node */ +static struct ubmad_msn_node * +ubmad_create_msn_node(uint64_t msn, struct ubmad_msn_mgr *msn_mgr) +{ + struct ubmad_msn_node *msn_node; + unsigned long flag; + uint32_t hash; + + msn_node = kzalloc(sizeof(struct ubmad_msn_node), GFP_KERNEL); + if (IS_ERR_OR_NULL(msn_node)) + return ERR_PTR(-ENOMEM); + + msn_node->msn = msn; + INIT_HLIST_NODE(&msn_node->node); + + hash = jhash(&msn, sizeof(uint64_t), 0) % UBMAD_MSN_HLIST_SIZE; + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_add_head(&msn_node->node, &msn_mgr->msn_hlist[hash]); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + + return msn_node; +} + +static void ubmad_destroy_msn_node(struct ubmad_msn_node *msn_node, + struct ubmad_msn_mgr *msn_mgr) +{ + unsigned long flag; + + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_del(&msn_node->node); + kfree(msn_node); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); +} + +/* retransmission work */ +static void ubmad_rt_work_handler(struct work_struct *work) +{ + struct delayed_work *delay_work = + container_of(work, struct delayed_work, work); + struct ubmad_rt_work *rt_work = + container_of(delay_work, struct ubmad_rt_work, delay_work); + + struct ubmad_msn_mgr *msn_mgr = rt_work->msn_mgr; + unsigned long flag; + struct ubmad_msn_node *cur; + struct hlist_node *next; + uint32_t hash = jhash(&rt_work->msn, sizeof(uint64_t), 0) % + UBMAD_MSN_HLIST_SIZE; + bool found = false; + + struct ubmad_msg *msg = rt_work->msg; + uint64_t sge_addr = (uint64_t)msg; + uint32_t sge_idx; + struct ubmad_jetty_resource *rsrc = rt_work->rsrc; + + // try to find msn_node + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &msn_mgr->msn_hlist[hash], node) { + if (cur->msn == rt_work->msn) { + found = true; + break; + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + + // found indicates not ack. Need to repost + if (found && rt_work->rt_cnt <= UBMAD_MAX_RETRY_CNT) { + rt_work->rt_cnt++; + if (ubmad_repost_send(msg, rt_work->tjetty, rsrc->send_seg, + rt_work->rt_wq, rsrc) == 0) + return; + ubcm_log_err("repost send failed. msg type %d msn %llu\n", + msg->msg_type, msg->msn); + } + ubcm_log_info("Do not repost, found: %u, rt_work->rt_cnt: %u.\n", + (uint32_t)found, rt_work->rt_cnt); + + /* not found OR repost failed + * put data msg sge id + */ + if (sge_addr < rsrc->send_seg->seg.ubva.va) { + ubcm_log_err("sge addr should not < seg addr\n"); + } else { + sge_idx = (sge_addr - rsrc->send_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + ubmad_bitmap_put_id(rsrc->send_seg_bitmap, + sge_idx); // get in ubmad_do_post_send() + } + kfree(rt_work); +} + +struct ubmad_rt_work *ubmad_create_rt_work(struct workqueue_struct *rt_wq, + struct ubmad_msn_mgr *msn_mgr, + struct ubmad_msg *msg, + struct ubmad_tjetty *tjetty, + struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_rt_work *rt_work; + + rt_work = kzalloc(sizeof(struct ubmad_rt_work), + GFP_KERNEL); // free in ubmad_rt_work_handler() + if (IS_ERR_OR_NULL(rt_work)) + return ERR_PTR(-ENOMEM); + rt_work->msn = msg->msn; + rt_work->msn_mgr = msn_mgr; + rt_work->msg = msg; + rt_work->tjetty = tjetty; + rt_work->rsrc = rsrc; + rt_work->rt_wq = rt_wq; + rt_work->rt_cnt = 0; + + INIT_DELAYED_WORK(&rt_work->delay_work, ubmad_rt_work_handler); + if (queue_delayed_work(rt_wq, &rt_work->delay_work, + UBMAD_RETRANSMIT_PERIOD) != true) { + ubcm_log_err("queue rt work failed\n"); + kfree(rt_work); + return NULL; + } + + return rt_work; +} + +/* seid_node */ +static struct ubmad_seid_node * +ubmad_get_seid_node(union ubcore_eid *seid, struct ubmad_jetty_resource *rsrc) +{ + unsigned long flag; + struct ubmad_seid_node *cur; + struct hlist_node *next; + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &rsrc->seid_hlist[hash], node) { + if (memcmp(&cur->seid, seid, sizeof(union ubcore_eid)) == 0) { + kref_get(&cur->kref); + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + return cur; + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + + return NULL; +} + +static void ubmad_release_seid_node(struct kref *kref) +{ + struct ubmad_seid_node *seid_node = + container_of(kref, struct ubmad_seid_node, kref); + struct ubmad_bitmap *rx_bitmap = seid_node->rx_bitmap; + + if (rx_bitmap) + ubmad_destroy_bitmap(rx_bitmap); + kfree(seid_node); +} + +static void ubmad_put_seid_node(struct ubmad_seid_node *seid_node) +{ + kref_put(&seid_node->kref, ubmad_release_seid_node); +} + +/* need to put twice to release seid_node. + * First put for kref_get() is called by user after using created seid_node. + * Second put for kref_init() is in ubmad_uninit_seid_hlist(). + */ +static struct ubmad_seid_node * +ubmad_create_seid_node(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_seid_node *seid_node; + uint32_t hash; + unsigned long flag; + + seid_node = kzalloc(sizeof(struct ubmad_seid_node), GFP_KERNEL); + if (IS_ERR_OR_NULL(seid_node)) + return ERR_PTR(-ENOMEM); + + seid_node->rx_bitmap = ubmad_create_bitmap(UBMAD_RX_BITMAP_SIZE); + if (IS_ERR_OR_NULL(seid_node->rx_bitmap)) { + kfree(seid_node); + return ERR_PTR(-ENOMEM); + } + + seid_node->seid = *seid; + INIT_HLIST_NODE(&seid_node->node); + kref_init(&seid_node->kref); + atomic64_set(&seid_node->expected_msn, 0); + + hash = jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_add_head(&seid_node->node, &rsrc->seid_hlist[hash]); + kref_get(&seid_node->kref); // put by user outside this func + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); + + return seid_node; +} + +static void ubmad_delete_seid_node(union ubcore_eid *seid, + struct ubmad_jetty_resource *rsrc) +{ + uint32_t hash = + jhash(seid, sizeof(union ubcore_eid), 0) % UBMAD_MAX_SEID_NUM; + struct ubmad_seid_node *cur; + struct hlist_node *next; + unsigned long flag; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &rsrc->seid_hlist[hash], node) { + if (memcmp(&cur->seid, seid, sizeof(union ubcore_eid)) == 0) { + hlist_del(&cur->node); + ubmad_put_seid_node(cur); + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); +} + +void ubmad_init_seid_hlist(struct ubmad_jetty_resource *rsrc) +{ + uint32_t idx; + + for (idx = 0; idx < UBMAD_MSN_HLIST_SIZE; idx++) + INIT_HLIST_HEAD(&rsrc->seid_hlist[idx]); + spin_lock_init(&rsrc->seid_hlist_lock); +} + +void ubmad_uninit_seid_hlist(struct ubmad_jetty_resource *rsrc) +{ + struct ubmad_seid_node *seid_node; + struct hlist_node *next; + unsigned long flag; + uint32_t idx; + + spin_lock_irqsave(&rsrc->seid_hlist_lock, flag); + for (idx = 0; idx < UBMAD_MAX_SEID_NUM; idx++) { + hlist_for_each_entry_safe(seid_node, next, + &rsrc->seid_hlist[idx], node) { + hlist_del(&seid_node->node); + ubmad_put_seid_node(seid_node); + } + } + spin_unlock_irqrestore(&rsrc->seid_hlist_lock, flag); +} + +/** post **/ +// prepare msg to send +static int ubmad_prepare_msg(uint64_t sge_addr, struct ubmad_send_buf *send_buf, + uint32_t msn, struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + + if (sizeof(struct ubmad_msg) + send_buf->payload_len > + UBMAD_SGE_MAX_LEN) { + ubcm_log_err( + "msg header %lu + payload_len %u exceeds sge max length %u\n", + sizeof(struct ubmad_msg), send_buf->payload_len, + UBMAD_SGE_MAX_LEN); + return -EINVAL; + } + if (send_buf->msg_type == UBMAD_AUTHN_DATA && + send_buf->payload_len != 0) { + ubcm_log_err("Invalid authentication payload_len %u\n", + send_buf->payload_len); + return -EINVAL; + } + + msg->version = UBMAD_MSG_VERSION_0; + msg->msn = msn; + msg->msg_type = send_buf->msg_type; + msg->payload_len = send_buf->payload_len; + if (send_buf->msg_type == UBMAD_CONN_DATA || + send_buf->msg_type == UBMAD_UBC_CONN_DATA) + // send_buf will be freed by cm. mad needs to memcpy user data to send sge. + (void)memcpy((void *)msg->payload, send_buf->payload, + send_buf->payload_len); + + return 0; +} + +static int ubmad_do_post_send_conn_data(struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty, + struct ubcore_jfs_wr *jfs_wr, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + uint64_t sge_addr = jfs_wr->send.src.sge->addr; + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + uint64_t msn = msg->msn; + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + + struct ubmad_msn_node *msn_node; + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + + int ret; + + /* create msn_node before post to avoid recv ack before msn_node created and wrongly trigger + * fast retransmission. + */ + msn_node = ubmad_create_msn_node(msn, &tjetty->msn_mgr); + if (IS_ERR_OR_NULL(msn_node)) { + ubcm_log_err("create msn_node failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + return -1; + } + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold, tx_in_queue: %u.\n", + (uint32_t)atomic_read(&rsrc->tx_in_queue)); + ret = -1; + goto destroy_msn_node; + } + ret = ubcore_post_jetty_send_wr(jetty, jfs_wr, &jfs_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post send failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + atomic_fetch_sub(1, &rsrc->tx_in_queue); + goto destroy_msn_node; + } + + ubcm_log_info("send conn data successfully. msn %llu eid " EID_FMT "\n", + msn, EID_ARGS(*dst_eid)); + return 0; + +destroy_msn_node: + ubmad_destroy_msn_node(msn_node, &tjetty->msn_mgr); + return ret; +} + +static int ubmad_do_post_send(struct ubmad_jetty_resource *rsrc, + struct ubmad_tjetty *tjetty, + struct ubmad_send_buf *send_buf, uint64_t msn, + struct workqueue_struct *rt_wq) +{ + uint32_t sge_idx; + uint64_t sge_addr; + struct ubcore_jetty *jetty = rsrc->jetty; + struct ubmad_msg *msg; + + struct ubcore_sge sge = { 0 }; + struct ubcore_jfs_wr jfs_wr = { 0 }; + + int ret; + + /* prepare */ + /* get sge + * data msg sge id put in ubmad_rt_work_handler(). + * ack sge id put in ubmad_send_work_handler() + */ + sge_idx = ubmad_bitmap_get_id(rsrc->send_seg_bitmap); + if (sge_idx >= rsrc->send_seg_bitmap->size) { + ubcm_log_err("get sge_idx failed\n"); + return -1; + } + sge_addr = rsrc->send_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + + // prepare msg, msg stored in sge + ret = ubmad_prepare_msg(sge_addr, send_buf, msn, jetty, tjetty); + if (ret != 0) { + ubcm_log_err("prepare msg failed. ret %d payload_len %u\n", ret, + send_buf->payload_len); + goto put_id; + } + + // prepare wr + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty->tjetty; + sge.addr = sge_addr; + sge.len = send_buf->payload_len + + sizeof(struct ubmad_msg); // only need to send data len + sge.tseg = rsrc->send_seg; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + jfs_wr.user_ctx = sge_addr; + jfs_wr.flag.bs.complete_enable = 1; + + /* post */ + msg = (struct ubmad_msg *)sge_addr; + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ret = ubmad_do_post_send_conn_data(jetty, tjetty, &jfs_wr, + rt_wq, rsrc); + break; + case UBMAD_CONN_ACK: + case UBMAD_UBC_CONN_ACK: + case UBMAD_AUTHN_DATA: + case UBMAD_AUTHN_ACK: + ubcm_log_warn_rl("No need to send ack, msg->msg_type: %d", + (int)msg->msg_type); + ret = -1; + break; + default: + ubcm_log_err("invalid msg_type %d\n", msg->msg_type); + ret = -EINVAL; + } + if (ret != 0) { + ubcm_log_err("post send failed. msg type %d ret %d\n", + msg->msg_type, ret); + goto put_id; + } + + return 0; + +put_id: + (void)ubmad_bitmap_put_id(rsrc->send_seg_bitmap, sge_idx); + return ret; +} + +// for UBMAD_CONN_DATA, UBMAD_AUTHN_DATA +int ubmad_post_send(struct ubcore_device *device, + struct ubmad_send_buf *send_buf, + struct ubmad_send_buf **bad_send_buf) +{ + struct ubmad_device_priv *dev_priv = NULL; + struct ubmad_jetty_resource *rsrc; + struct ubcore_jetty *wk_jetty; // well-known jetty + struct ubmad_tjetty *wk_tjetty; + union ubcore_eid dst_primary_eid = { 0 }; + int ret; + + dev_priv = ubmad_get_device_priv(device); // put below + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s.\n", + device->dev_name); + return -1; + } + if (!dev_priv->valid) { + ubcm_log_err("dev_priv rsrc not inited. dev_name: %s.\n", + device->dev_name); + ret = -1; + goto put_device_priv; + } + + switch (send_buf->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + rsrc = &dev_priv->jetty_rsrc[0]; + break; + case UBMAD_AUTHN_DATA: + rsrc = &dev_priv->jetty_rsrc[1]; + break; + default: + ubcm_log_err("Invalid msg_type: %d.\n", + (int)send_buf->msg_type); + ret = -EINVAL; + goto put_device_priv; + } + wk_jetty = rsrc->jetty; + + /* import well-known jetty */ + // unimport in ubmad_uninit_jetty_rsrc() + ret = ubcore_get_primary_eid(&send_buf->dst_eid, &dst_primary_eid); + if (ret != 0) { + ubcm_log_err("get primary eid failed\n"); + goto put_device_priv; + } + wk_tjetty = ubmad_import_jetty(device, rsrc, &dst_primary_eid); + if (IS_ERR_OR_NULL(wk_tjetty)) { + ubcm_log_err("import jetty failed. eid " EID_FMT "\n", + EID_ARGS(dst_primary_eid)); + ret = -1; + goto put_device_priv; + } + + /* post send */ + ret = ubmad_do_post_send( + rsrc, wk_tjetty, send_buf, + atomic64_fetch_inc(&wk_tjetty->msn_mgr.msn_gen), + dev_priv->rt_wq); + + ubmad_put_tjetty(wk_tjetty); // first put for ubmad_import_jetty() above +put_device_priv: + ubmad_put_device_priv(dev_priv); // get above + return ret; +} + +// post send UBMAD_CONN_ACK when recv conn data +int ubmad_post_send_conn_ack(struct ubmad_jetty_resource *rsrc, + struct ubmad_tjetty *tjetty, uint64_t msn) +{ + struct ubmad_send_buf send_buf = { 0 }; + + send_buf.src_eid = rsrc->jetty->jetty_id.eid; + send_buf.dst_eid = tjetty->tjetty->cfg.id.eid; + send_buf.msg_type = UBMAD_CONN_ACK; + + if (ubmad_do_post_send(rsrc, tjetty, &send_buf, msn, NULL) != 0) { + ubcm_log_err("post send conn ack failed. dst_eid " EID_FMT + ", msn %llu\n", + EID_ARGS(send_buf.dst_eid), msn); + return -1; + } + + return 0; +} + +/* repost send for retransmission of UBMAD_CONN_DATA / UBMAD_UBC_CONN_DATA */ +int ubmad_repost_send_conn_data(struct ubcore_jetty *jetty, + struct ubmad_tjetty *tjetty, + struct ubcore_jfs_wr *jfs_wr, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + uint64_t sge_addr = jfs_wr->send.src.sge->addr; + struct ubmad_msg *msg = (struct ubmad_msg *)sge_addr; + uint64_t msn = msg->msn; + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + struct ubmad_rt_work *rt_work; + + int ret = -1; + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold.\n"); + return -1; + } + ret = ubcore_post_jetty_send_wr(jetty, jfs_wr, &jfs_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post send failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + atomic_fetch_sub(1, &rsrc->tx_in_queue); + return ret; + } + + // create rt_work after post to avoid rt_work handled before first post. + rt_work = ubmad_create_rt_work(rt_wq, &tjetty->msn_mgr, msg, tjetty, + rsrc); + if (IS_ERR_OR_NULL(rt_work)) { + ubcm_log_err("create rt_work failed. msn %llu eid " EID_FMT + "\n", + msn, EID_ARGS(*dst_eid)); + return -1; + } + + ubcm_log_info("send conn data successfully. msn %llu eid " EID_FMT "\n", + msn, EID_ARGS(*dst_eid)); + return 0; +} + +int ubmad_repost_send(struct ubmad_msg *msg, struct ubmad_tjetty *tjetty, + struct ubcore_target_seg *send_seg, + struct workqueue_struct *rt_wq, + struct ubmad_jetty_resource *rsrc) +{ + union ubcore_eid *dst_eid = &tjetty->tjetty->cfg.id.eid; + uint64_t sge_addr = (uint64_t)msg; + struct ubcore_sge sge = { 0 }; + struct ubcore_jfs_wr jfs_wr = { 0 }; + int ret; + + ubcm_log_info("timeout and repost. msn %llu eid " EID_FMT "\n", + msg->msn, EID_ARGS(*dst_eid)); + + // prepare wr + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty->tjetty; + sge.addr = sge_addr; + sge.len = msg->payload_len + sizeof(struct ubmad_msg); + sge.tseg = send_seg; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + jfs_wr.user_ctx = sge_addr; + jfs_wr.flag.bs.complete_enable = 1; + (void)jfs_wr; + + /* post */ + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ubcm_log_err("Invalid msg_type: %d", (int)msg->msg_type); + ret = -1; + break; + default: + ubcm_log_err("invalid msg_type %d. msn %llu eid " EID_FMT "\n", + msg->msg_type, msg->msn, EID_ARGS(*dst_eid)); + return -EINVAL; + } + + if (ret != 0) { + ubcm_log_err( + "repost send failed. msg type %d msn %llu eid " EID_FMT + "\n", + msg->msg_type, msg->msn, EID_ARGS(*dst_eid)); + return ret; + } + + return 0; +} + +void ubmad_post_send_close_req(struct ubmad_jetty_resource *rsrc, + struct ubcore_tjetty *tjetty) +{ + struct ubcore_jfs_wr *jfs_bad_wr = NULL; + struct ubcore_jfs_wr jfs_wr = { 0 }; + struct ubcore_sge sge = { 0 }; + struct ubmad_msg *msg; + uint64_t sge_addr; + uint32_t sge_idx; + int ret; + + if (atomic_fetch_add(1, &rsrc->tx_in_queue) >= UBMAD_TX_THREDSHOLD) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Invalid threshold, tx_in_queue: %u.\n", + (uint32_t)atomic_read(&rsrc->tx_in_queue)); + return; + } + + sge_idx = ubmad_bitmap_get_id(rsrc->send_seg_bitmap); + if (sge_idx >= rsrc->send_seg_bitmap->size) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_err("Failed to get sge_idx: %u.\n", sge_idx); + return; + } + + sge_addr = rsrc->send_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + msg = (struct ubmad_msg *)sge_addr; + msg->version = UBMAD_MSG_VERSION_0; + msg->msg_type = UBMAD_CLOSE_REQ; + msg->payload_len = 0; + msg->msn = 0; // UBMAD_CLOSE_REQ is unreliable, msn does not work + + sge.addr = sge_addr; + sge.len = (uint32_t)sizeof(struct ubmad_msg); + sge.tseg = rsrc->send_seg; + jfs_wr.opcode = UBCORE_OPC_SEND; + jfs_wr.tjetty = tjetty; + jfs_wr.send.src.sge = &sge; + jfs_wr.send.src.num_sge = 1; + + ret = ubcore_post_jetty_send_wr(rsrc->jetty, &jfs_wr, &jfs_bad_wr); + if (ret != 0) { + atomic_fetch_sub(1, &rsrc->tx_in_queue); + ubcm_log_warn( + "Failed to send close request, ret: %d, jetty_id: %u.\n", + ret, rsrc->jetty->jetty_id.id); + } +} + +/* + * 1. fill up jfr in ubmad_open_device() for first post_send of each jetty0 pair. + * 2. supplement one consumed wqe to jfr after poll jfc_r in ubmad_jfce_handler_r(). + * 3. this function is private and in ubmad range. + */ +int ubmad_post_recv(struct ubmad_jetty_resource *rsrc) +{ + uint32_t sge_idx; + uint64_t sge_addr; + struct ubcore_sge sge = { 0 }; + struct ubcore_jfr_wr jfr_wr = { 0 }; + struct ubcore_jfr_wr *jfr_bad_wr = NULL; + int ret; + + sge_idx = ubmad_bitmap_get_id( + rsrc->recv_seg_bitmap); // put in ubmad_recv_work_handler() + if (sge_idx >= rsrc->recv_seg_bitmap->size) { + ubcm_log_err("get sge_idx failed\n"); + return -1; + } + sge_addr = rsrc->recv_seg->seg.ubva.va + UBMAD_SGE_MAX_LEN * sge_idx; + + sge.addr = sge_addr; + sge.len = UBMAD_SGE_MAX_LEN; + sge.tseg = rsrc->recv_seg; + jfr_wr.src.sge = &sge; + jfr_wr.src.num_sge = 1; + jfr_wr.user_ctx = sge_addr; + ret = ubcore_post_jetty_recv_wr(rsrc->jetty, &jfr_wr, &jfr_bad_wr); + if (ret != 0) { + ubcm_log_err("ubcore post recv failed. ret %d\n", ret); + return ret; + } + + return 0; +} + +/** poll **/ +/* process msg after recv */ +static int ubmad_cm_process_msg(struct ubcore_cr *cr, + union ubcore_eid *local_eid, + struct ubmad_msg *msg, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_recv_cr recv_cr = { 0 }; + + recv_cr.cr = cr; + recv_cr.local_eid = *local_eid; + recv_cr.msg_type = msg->msg_type; + recv_cr.payload = (uint64_t)msg->payload; + recv_cr.payload_len = msg->payload_len; + + if (agent_priv->agent.recv_handler != NULL && + agent_priv->agent.recv_handler(&agent_priv->agent, &recv_cr) != 0) { + ubcm_log_err("recv_handler exec failed\n"); + return -1; + } + + return 0; +} + +/* Return value: true - msn is valid and message processed; */ +/* false - msn is invalid and message dropped */ +bool ubmad_process_rx_msn(struct ubmad_bitmap *rx_bitmap, uint64_t msn) +{ + bool result; + uint32_t i; + + if (rx_bitmap->right_end >= UBMAD_RX_BITMAP_SIZE && + msn <= rx_bitmap->right_end - UBMAD_RX_BITMAP_SIZE) + return false; + + if (msn <= rx_bitmap->right_end) { + result = ubmad_bitmap_test_id( + rx_bitmap, (uint32_t)(msn % UBMAD_RX_BITMAP_SIZE)); + } else { + for (i = rx_bitmap->right_end + 1; i < msn; i++) + (void)ubmad_bitmap_put_id(rx_bitmap, + i % UBMAD_RX_BITMAP_SIZE); + rx_bitmap->right_end = msn; + ubmad_bitmap_set_id(rx_bitmap, msn); + result = true; + } + + return result; +} + +static int ubmad_process_conn_data(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + struct ubmad_seid_node *seid_node; + int ret = 0; + + // get seid_node + seid_node = ubmad_get_seid_node(seid, rsrc); // put below + if (IS_ERR_OR_NULL(seid_node)) { + // destroy in ubmad_uninit_seid_hlist(). No need to destroy even err below. + seid_node = ubmad_create_seid_node(seid, rsrc); + if (IS_ERR_OR_NULL(seid_node)) { + ubcm_log_err( + "create seid_node failed for first msg. msn %llu seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + return -1; + } + } + + ubcm_log_info( + "Finish to recv request. msn %llu right_end %llu, seid " EID_FMT + "\n", + msg->msn, seid_node->rx_bitmap->right_end, EID_ARGS(*seid)); + + ret = ubmad_cm_process_msg(cr, &rsrc->jetty->jetty_id.eid, msg, + agent_priv); + if (ret != 0) + ubcm_log_err("cm process msg failed. msn %llu, seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + + ubmad_put_seid_node(seid_node); + return ret; +} + +static void ubmad_process_conn_ack(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + struct ubmad_tjetty *tjetty; + + struct ubmad_msn_mgr *msn_mgr; + unsigned long flag; + struct ubmad_msn_node *cur; + struct hlist_node *next; + uint32_t hash = + jhash(&msg->msn, sizeof(uint64_t), 0) % UBMAD_MSN_HLIST_SIZE; + + tjetty = ubmad_get_tjetty(seid, rsrc); // put below + if (IS_ERR_OR_NULL(tjetty)) { + ubcm_log_err("get tjetty failed. eid " EID_FMT "\n", + EID_ARGS(*seid)); + return; + } + + // try to remove msn_node from msn_hlist + msn_mgr = &tjetty->msn_mgr; + spin_lock_irqsave(&msn_mgr->msn_hlist_lock, flag); + hlist_for_each_entry_safe(cur, next, &msn_mgr->msn_hlist[hash], node) { + if (cur->msn == msg->msn) { + hlist_del(&cur->node); + kfree(cur); + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + goto put_tjetty; + } + } + spin_unlock_irqrestore(&msn_mgr->msn_hlist_lock, flag); + // msn_node not in msn_hlist, indicates already removed by previous ack with same msn + ubcm_log_info("redundant ack. msn %llu seid " EID_FMT "\n", msg->msn, + EID_ARGS(*seid)); + +put_tjetty: + ubmad_put_tjetty(tjetty); + ubcm_log_info("recv conn ack. msn %llu seid " EID_FMT "\n", msg->msn, + EID_ARGS(*seid)); +} + +static int ubmad_process_authn_data(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + union ubcore_eid *seid = &cr->remote_id.eid; + int ret; + + ret = ubmad_cm_process_msg(cr, &rsrc->jetty->jetty_id.eid, msg, + agent_priv); + if (ret != 0) + ubcm_log_err("cm process msg failed. msn %llu, seid " EID_FMT + "\n", + msg->msn, EID_ARGS(*seid)); + + return ret; +} + +static inline void ubmad_process_close_req(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc) +{ + ubmad_remove_tjetty(&cr->remote_id.eid, rsrc); + ubmad_delete_seid_node(&cr->remote_id.eid, rsrc); + + ubcm_log_info("Finish to process close request, remote eid: " EID_FMT + ", remote id: %u.\n", + EID_ARGS(cr->remote_id.eid), cr->remote_id.id); +} + +static int ubmad_process_msg(struct ubcore_cr *cr, + struct ubmad_jetty_resource *rsrc, + struct ubmad_device_priv *dev_priv, + struct ubmad_agent_priv *agent_priv) +{ + struct ubmad_msg *msg = (struct ubmad_msg *)cr->user_ctx; + int ret = 0; + + if (cr->completion_len < sizeof(struct ubmad_msg)) { + ubcm_log_err( + "even header is incomplete. completion_len %u < header size %lu\n", + cr->completion_len, sizeof(struct ubmad_msg)); + return -EINVAL; + } + if (cr->completion_len != sizeof(struct ubmad_msg) + msg->payload_len) { + ubcm_log_err( + "completion_len not right. completion_len %u != header %lu + payload len %u\n", + cr->completion_len, sizeof(struct ubmad_msg), + msg->payload_len); + return -EINVAL; + } + + switch (msg->msg_type) { + case UBMAD_CONN_DATA: + case UBMAD_UBC_CONN_DATA: + ret = ubmad_process_conn_data(cr, rsrc, dev_priv, agent_priv); + break; + case UBMAD_CONN_ACK: + case UBMAD_UBC_CONN_ACK: + ubmad_process_conn_ack(cr, rsrc, dev_priv, agent_priv); + break; + case UBMAD_AUTHN_DATA: + ret = ubmad_process_authn_data(cr, rsrc, agent_priv); + break; + case UBMAD_CLOSE_REQ: + ubmad_process_close_req(cr, rsrc); + break; + default: + ubcm_log_err("Invalid msg_type: %u.\n", msg->msg_type); + ret = -EINVAL; + } + + return ret; +} + +/* send_ops for ubcore connection manager */ +// for UBMAD_UBC_CONN_DATA +int ubmad_ubc_send(struct ubcore_device *device, + struct ubcore_cm_send_buf *send_buf) +{ + struct ubmad_send_buf *bad_send_buf = NULL; + struct ubmad_device_priv *dev_priv; + int ret; + + if (device == NULL || send_buf == NULL) { + ubcm_log_err("Invalid parameter.\n"); + return -EINVAL; + } + if (send_buf->msg_type != UBCORE_CM_CONN_MSG) { + ubcm_log_err("Invalid message type: %u.\n", send_buf->msg_type); + return -EINVAL; + } + + dev_priv = ubmad_get_device_priv(device); + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("Failed to get dev_priv, dev_name: %s\n", + device->dev_name); + return -1; + } + + send_buf->src_eid = dev_priv->eid_info.eid; + ubmad_put_device_priv(dev_priv); + + ubcm_log_info("ubc dev: %s, s_eid: " EID_FMT ", d_eid: " EID_FMT " ", + device->dev_name, EID_ARGS(send_buf->src_eid), + EID_ARGS(send_buf->dst_eid)); + + ret = ubmad_post_send(device, (struct ubmad_send_buf *)send_buf, + &bad_send_buf); + if (ret != 0) + ubcm_log_err("Failed to send message, ret: %d, length: %u.\n", + ret, send_buf->payload_len); + + return ret; +} + +/* jfce work handler */ +// polling here only indicates if send successfully +static void ubmad_send_work_handler(struct ubmad_device_priv *dev_priv, + struct ubmad_jfce_work *jfce_work) +{ + struct ubmad_jetty_resource *rsrc; + struct ubmad_msg *msg; + uint32_t sge_idx; + int ret; + int cr_cnt; + struct ubcore_cr cr = {0}; + struct ubmad_agent_priv *agent_priv = jfce_work->agent_priv; + struct ubcore_jfc *jfc = jfce_work->jfc; + struct ubmad_send_cr send_cr = {0}; + + cr_cnt = 0; + + rsrc = ubmad_get_jetty_rsrc_by_jfc_s(dev_priv, jfc); + if (IS_ERR_OR_NULL(rsrc)) { + ubcm_log_err("Failed to match jfc for send.\n"); + return; + } + + do { + cr_cnt = ubcore_poll_jfc(jfc, 1, &cr); + if (cr_cnt < 0) { + ubcm_log_err("cr_cnt %d < 0\n", cr_cnt); + break; + } + if (cr_cnt == 0) + break; + + /* cr_cnt == 1 */ + atomic_dec(&rsrc->tx_in_queue); + if (cr.status == UBCORE_CR_SUCCESS) { + send_cr.cr = &cr; + if (agent_priv->agent.send_handler != NULL && + agent_priv->agent.send_handler(&agent_priv->agent, + &send_cr) != 0) + ubcm_log_err("send handler failed. cr_cnt %d\n", + cr_cnt); + } + + // put ack msg sge id + if (cr.user_ctx < rsrc->send_seg->seg.ubva.va) { + ubcm_log_err( + "invalid cr.user_ctx. sge addr should not < seg addr\n"); + } else { + msg = (struct ubmad_msg *)cr.user_ctx; + sge_idx = (cr.user_ctx - rsrc->send_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + ubmad_bitmap_put_id( + rsrc->send_seg_bitmap, + sge_idx); // get in ubmad_do_post_send() + } + if (cr.status != UBCORE_CR_SUCCESS) { + ubcm_log_err( + "Tx status error. cr_cnt %d, status %d, comp_len %u, user_ctx: 0x%llx.\n", + cr_cnt, cr.status, cr.completion_len, + cr.user_ctx); + break; + } + } while (cr_cnt > 0); + + ret = ubcore_rearm_jfc(jfc, false); + ubcm_log_info("Rearm send jfc, jfc_id: %u, ret: %d.\n", jfc->id, ret); +} + +// polling here indicates if recv msg +static void ubmad_recv_work_handler(struct ubmad_device_priv *dev_priv, + struct ubmad_jfce_work *jfce_work) +{ + struct ubcore_jfc *jfc = jfce_work->jfc; + struct ubmad_jetty_resource *rsrc; + struct ubcore_cr cr = {0}; + uint32_t sge_idx; + int ret; + int cr_cnt; + + cr_cnt = 0; + + rsrc = ubmad_get_jetty_rsrc_by_jfc_r(dev_priv, jfc); + if (IS_ERR_OR_NULL(rsrc)) { + ubcm_log_err("Failed to match jfc for recv.\n"); + return; + } + + do { + cr_cnt = ubcore_poll_jfc(jfc, 1, &cr); + if (cr_cnt < 0) { + ubcm_log_err("cr_cnt %d < 0\n", cr_cnt); + break; + } + if (cr_cnt == 0) + break; + + /* cr_cnt == 1 */ + if (cr.status == UBCORE_CR_SUCCESS) { + if (ubmad_process_msg(&cr, rsrc, dev_priv, + jfce_work->agent_priv) != 0) + ubcm_log_err("process msg failed\n"); + } + + // put sge id + if (cr.user_ctx < rsrc->recv_seg->seg.ubva.va) { + ubcm_log_err( + "invalid cr.user_ctx. sge addr should not < seg addr\n"); + } else { + sge_idx = (cr.user_ctx - rsrc->recv_seg->seg.ubva.va) / + UBMAD_SGE_MAX_LEN; + // get in ubmad_post_recv() + ubmad_bitmap_put_id(rsrc->recv_seg_bitmap, sge_idx); + } + + // supplement one consumed wqe + if (ubmad_post_recv(rsrc) != 0) + ubcm_log_err("post recv in jfce handler failed.\n"); + + if (cr.status != UBCORE_CR_SUCCESS) { + ubcm_log_err( + "Rx status error. cr_cnt %d, status %d, comp_len %u, user_ctx: 0x%llx.\n", + cr_cnt, cr.status, cr.completion_len, + cr.user_ctx); + break; + } + } while (cr_cnt > 0); + + ret = ubcore_rearm_jfc(jfc, false); + ubcm_log_info("Rearm recv jfc, jfc_id: %u, ret: %d.\n", jfc->id, ret); +} + +// continue from ubmad_jfce_handler() +static void ubmad_jfce_work_handler(struct work_struct *work) +{ + struct ubmad_jfce_work *jfce_work = + container_of(work, struct ubmad_jfce_work, work); + struct ubcore_device *dev = jfce_work->jfc->ub_dev; + struct ubmad_device_priv *dev_priv = NULL; + + dev_priv = ubmad_get_device_priv(dev); // put below + if (IS_ERR_OR_NULL(dev_priv)) { + ubcm_log_err("fail to get dev_priv, dev_name: %s.\n", + dev->dev_name); + goto put_agent_priv; + } + if (!dev_priv->valid) { + ubcm_log_err_rl("dev_priv rsrc not inited. dev_name: %s.\n", + dev->dev_name); + goto put_device_priv; + } + + switch (jfce_work->type) { + case UBMAD_SEND_WORK: + ubmad_send_work_handler(dev_priv, jfce_work); + break; + case UBMAD_RECV_WORK: + ubmad_recv_work_handler(dev_priv, jfce_work); + break; + default: + ubcm_log_err("unknown work type %d\n", jfce_work->type); + } + +put_device_priv: + ubmad_put_device_priv(dev_priv); // get above +put_agent_priv: + ubmad_put_agent_priv( + jfce_work->agent_priv); // get in ubmad_jfce_handler() + kfree(jfce_work); // alloc in ubmad_jfce_handler() +} + +/* jfce handler */ +// see ubmad_jfce_work_handler() then +static void ubmad_jfce_handler(struct ubcore_jfc *jfc, + enum ubmad_jfce_work_type type) +{ + struct ubmad_agent_priv *agent_priv = NULL; + struct ubmad_jfce_work *jfce_work; + int ret; + + agent_priv = ubmad_get_agent_priv( + jfc->ub_dev); // put in ubmad_jfce_work_handler() + if (IS_ERR_OR_NULL(agent_priv)) { + ubcm_log_err("Failed to get agent_priv, dev_name: %s.\n", + jfc->ub_dev->dev_name); + return; + } + ubcm_log_info("Start to handle jfce, type: %d, jfc_id: %u.\n", type, jfc->id); + + // free in ubmad_jfce_work_handler() + jfce_work = kzalloc(sizeof(struct ubmad_jfce_work), GFP_ATOMIC); + if (IS_ERR_OR_NULL(jfce_work)) + goto put_agent_priv; + jfce_work->type = type; + jfce_work->jfc = jfc; + jfce_work->agent_priv = agent_priv; + + INIT_WORK(&jfce_work->work, ubmad_jfce_work_handler); + ret = queue_work(agent_priv->jfce_wq, &jfce_work->work); + if (!ret) { + ubcm_log_err("queue work failed. ret %d\n", ret); + goto free_work; + } + return; + +free_work: + kfree(jfce_work); +put_agent_priv: + ubmad_put_agent_priv(agent_priv); +} + +void ubmad_jfce_handler_s(struct ubcore_jfc *jfc) +{ + ubmad_jfce_handler(jfc, UBMAD_SEND_WORK); +} + +void ubmad_jfce_handler_r(struct ubcore_jfc *jfc) +{ + ubmad_jfce_handler(jfc, UBMAD_RECV_WORK); +} diff --git a/drivers/ub/urma/ubcore/ubcore_main.c b/drivers/ub/urma/ubcore/ubcore_main.c index bb0b7b452ef0..b0cb2ca68823 100644 --- a/drivers/ub/urma/ubcore/ubcore_main.c +++ b/drivers/ub/urma/ubcore/ubcore_main.c @@ -19,6 +19,7 @@ #include "ubcore_connect_adapter.h" #include "ubcore_connect_bonding.h" #include "ubcore_genl.h" +#include "ubcm/ub_cm.h" static int __init ubcore_init(void) { @@ -55,9 +56,17 @@ static int __init ubcore_init(void) goto create_wq; } + ret = ubcm_init(); + if (ret != 0) { + pr_err("Failed to init ubcm, ret: %d.\n", ret); + goto ubcm; + } + ubcore_log_info("ubcore module init success.\n"); return 0; +ubcm: + ubcore_destroy_workqueues(); create_wq: ubcore_unregister_pnet_ops(); reg_pnet: @@ -71,6 +80,7 @@ static int __init ubcore_init(void) static void __exit ubcore_exit(void) { + ubcm_uninit(); ubcore_destroy_workqueues(); ubcore_unregister_pnet_ops(); ubcore_genl_exit(); diff --git a/drivers/ub/urma/ubcore/ubcore_priv.c b/drivers/ub/urma/ubcore/ubcore_priv.c deleted file mode 100644 index 7ab054cfd4e4..000000000000 --- a/drivers/ub/urma/ubcore/ubcore_priv.c +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021-2025. All rights reserved. - * - * Description: ubcore device add and remove ops file - * Author: Qian Guoxin - * Create: 2021-08-03 - * Note: - * History: 2021-08-03: create file - */ - -#include -#include "ubcore_priv.h" - -#define UBCORE_DEVICE_NAME "ubcore" - -static LIST_HEAD(g_mue_cdev_list); -static DECLARE_RWSEM(g_mue_cdev_rwsem); - -static LIST_HEAD(g_client_list); -static LIST_HEAD(g_device_list); - -/* - * g_device_rwsem and g_lists_rwsem protect both g_device_list and g_client_list. - * g_device_rwsem protects writer access by device and client - * g_lists_rwsem protects reader access to these lists. - * Iterators of these lists must lock it for read, while updates - * to the lists must be done with a write lock. - */ -static DECLARE_RWSEM(g_device_rwsem); - -/* - * g_clients_rwsem protect g_client_list. - */ -static DECLARE_RWSEM(g_clients_rwsem); - -int ubcore_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, - uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, - struct ubcore_udata *udata) -{ - int ret; - - if (dev == NULL || dev->ops == NULL || dev->ops->get_tp_list == NULL || - cfg == NULL || tp_cnt == NULL || tp_list == NULL || *tp_cnt == 0) { - ubcore_log_err("Invalid parameter.\n"); - return -EINVAL; - } - - if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { - ubcore_log_err("Invalid parameter, trans_mode: %d.\n", - (int)cfg->trans_mode); - return -EINVAL; - } - - ret = dev->ops->get_tp_list(dev, cfg, tp_cnt, tp_list, udata); - if (ret != 0) - ubcore_log_err("Failed to get to list, ret: %d.\n", ret); - - return ret; -} -EXPORT_SYMBOL(ubcore_get_tp_list); diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c index b643fecc7ff3..51576f8a2f49 100644 --- a/drivers/ub/urma/ubcore/ubcore_segment.c +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -11,7 +11,7 @@ #include "ubcore_connect_bonding.h" #include "ubcore_log.h" -#include + #include "ubcore_priv.h" #include "ubcore_hash_table.h" #include "ubcore_tp.h" -- Gitee From d989d83e09db5505c664e28199cbef7dd1cd9543 Mon Sep 17 00:00:00 2001 From: Wen Chen Date: Wed, 12 Nov 2025 17:26:45 +0800 Subject: [PATCH 4/5] ubcore: add ubcore_umem and ubcore_tp implementations urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------------------- This patch introduces the ubcore_umem and ubcore_tp implementations, which provide essential memory management and transport functionality for the ubcore module. These components form the foundation for efficient data transfer operations within the URMA framework. The ubcore_umem module implements user memory management with the following capabilities: 1. Memory Pinning and Mapping: Handles user-space memory pinning using pin_user_pages_fast with FOLL_LONGTERM flag, ensuring long-term DMA accessibility while maintaining proper memory accounting against RLIMIT_MEMLOCK limits. 2. Scatter-Gather List Management: Efficiently manages memory regions through scatter-gather lists, supporting both contiguous and non-contiguous memory mappings with proper DMA mapping operations. 3. Page Size Optimization: Provides intelligent page size selection through ubcore_umem_find_best_page_size, which analyzes memory alignment and hardware capabilities to determine optimal page sizes for performance. These implementations include comprehensive error handling, resource cleanup, and proper memory lifecycle management to ensure system stability and prevent resource leaks. Signed-off-by: Wen Chen Signed-off-by: Yongqiang Guo --- drivers/ub/urma/ubcore/Makefile | 6 +- drivers/ub/urma/ubcore/net/ubcore_net.c | 1 - drivers/ub/urma/ubcore/net/ubcore_session.c | 1 - drivers/ub/urma/ubcore/net/ubcore_sock.c | 1 - drivers/ub/urma/ubcore/ubcm/ub_cm.c | 99 +----- drivers/ub/urma/ubcore/ubcm/ub_cm.h | 3 - drivers/ub/urma/ubcore/ubcm/ub_mad.c | 7 +- drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h | 1 - drivers/ub/urma/ubcore/ubcm/ubcm_genl.c | 3 - drivers/ub/urma/ubcore/ubcm/ubcm_genl.h | 3 - drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c | 2 +- drivers/ub/urma/ubcore/ubcore_cdev_file.c | 2 - drivers/ub/urma/ubcore/ubcore_cmd_tlv.h | 1 - .../ub/urma/ubcore/ubcore_connect_adapter.c | 7 +- .../ub/urma/ubcore/ubcore_connect_bonding.c | 2 +- drivers/ub/urma/ubcore/ubcore_device.c | 1 - drivers/ub/urma/ubcore/ubcore_dp.c | 125 +++++++ drivers/ub/urma/ubcore/ubcore_genl.c | 1 - drivers/ub/urma/ubcore/ubcore_genl.h | 1 + drivers/ub/urma/ubcore/ubcore_genl_admin.c | 1 - drivers/ub/urma/ubcore/ubcore_jetty.c | 6 - drivers/ub/urma/ubcore/ubcore_netdev.h | 59 ---- drivers/ub/urma/ubcore/ubcore_netlink.c | 1 - drivers/ub/urma/ubcore/ubcore_priv.h | 3 +- drivers/ub/urma/ubcore/ubcore_segment.c | 1 - drivers/ub/urma/ubcore/ubcore_topo_info.h | 2 +- drivers/ub/urma/ubcore/ubcore_tp.c | 41 +++ drivers/ub/urma/ubcore/ubcore_tp.h | 46 --- drivers/ub/urma/ubcore/ubcore_umem.c | 333 ++++++++++++++++++ drivers/ub/urma/ubcore/ubcore_uvs_cmd.c | 1 - drivers/ub/urma/ubcore/ubcore_uvs_cmd.h | 1 - drivers/ub/urma/ubcore/ubcore_workqueue.h | 1 - include/ub/urma/ubcore_types.h | 4 +- 33 files changed, 521 insertions(+), 246 deletions(-) create mode 100644 drivers/ub/urma/ubcore/ubcore_dp.c delete mode 100644 drivers/ub/urma/ubcore/ubcore_netdev.h create mode 100644 drivers/ub/urma/ubcore/ubcore_tp.c create mode 100644 drivers/ub/urma/ubcore/ubcore_umem.c diff --git a/drivers/ub/urma/ubcore/Makefile b/drivers/ub/urma/ubcore/Makefile index 940e617a753f..06c57e637fee 100644 --- a/drivers/ub/urma/ubcore/Makefile +++ b/drivers/ub/urma/ubcore/Makefile @@ -4,12 +4,13 @@ # ccflags-y += -I$(src) -ubcore-objs := ubcore_tp_table.o \ +ubcore-objs := \ net/ubcore_sock.o \ ubcore_workqueue.o \ ubcore_main.o \ ubcore_hash_table.o \ ubcore_tp.o \ +ubcore_tp_table.o \ ubcore_cgroup.o \ net/ubcore_net.o \ ubcore_cdev_file.o \ @@ -34,6 +35,7 @@ ubcm/ubcm_log.o \ ubcm/ub_mad.o \ ubcm/ubcm_genl.o \ ubcm/ub_cm.o \ -ubcm/ubmad_datapath.o +ubcm/ubmad_datapath.o \ +ubcore_umem.o obj-$(CONFIG_UB_URMA) += ubcore.o diff --git a/drivers/ub/urma/ubcore/net/ubcore_net.c b/drivers/ub/urma/ubcore/net/ubcore_net.c index ff379f5f3149..d52d5bc47dff 100644 --- a/drivers/ub/urma/ubcore/net/ubcore_net.c +++ b/drivers/ub/urma/ubcore/net/ubcore_net.c @@ -11,7 +11,6 @@ #include "ubcore_sock.h" #include "ubcore_cm.h" - #include "ubcore_log.h" #include "ubcore_net.h" diff --git a/drivers/ub/urma/ubcore/net/ubcore_session.c b/drivers/ub/urma/ubcore/net/ubcore_session.c index f58de48fe13b..afffe424f9c3 100644 --- a/drivers/ub/urma/ubcore/net/ubcore_session.c +++ b/drivers/ub/urma/ubcore/net/ubcore_session.c @@ -11,7 +11,6 @@ #include #include - #include "ubcore_log.h" #include "ubcore_session.h" diff --git a/drivers/ub/urma/ubcore/net/ubcore_sock.c b/drivers/ub/urma/ubcore/net/ubcore_sock.c index 15d4e6c597d4..4e9a45b5539e 100644 --- a/drivers/ub/urma/ubcore/net/ubcore_sock.c +++ b/drivers/ub/urma/ubcore/net/ubcore_sock.c @@ -21,7 +21,6 @@ #include #include #include - #include "ubcore_log.h" #include "ubcore_priv.h" #include "ubcore_sock.h" diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.c b/drivers/ub/urma/ubcore/ubcm/ub_cm.c index 5499adc7a03f..3b9b01b84c0c 100644 --- a/drivers/ub/urma/ubcore/ubcm/ub_cm.c +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.c @@ -14,20 +14,14 @@ #include #include #include - - - - #include "ubcm_log.h" #include "ubcm_genl.h" #include "ub_mad.h" #include "ub_cm.h" +#include "ub/urma/ubcore_uapi.h" #define UBCM_LOG_FILE_PERMISSION (0644) - #define UBCM_MODULE_NAME "ubcm" -#define UBCM_DEVNO_MODE (0666) -#define UBCM_DEVICE_NAME "ubcm" module_param(g_ubcm_log_level, uint, UBCM_LOG_FILE_PERMISSION); MODULE_PARM_DESC(g_ubcm_log_level, " 3: ERR, 4: WARNING, 6: INFO, 7: DEBUG"); @@ -76,20 +70,6 @@ static struct ubcore_client g_ubcm_client = { .list_node = LIST_HEAD_INIT( .add = ubcm_add_device, .remove = ubcm_remove_device }; -static char *ubcm_devnode(const struct device *dev, umode_t *mode) - -{ - if (mode) - *mode = UBCM_DEVNO_MODE; - - return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); -} - -static struct class g_ubcm_class = { - .name = UBCM_MODULE_NAME, - .devnode = ubcm_devnode, -}; - static int ubcm_get_ubc_dev(struct ubcore_device *device) { if (IS_ERR_OR_NULL(device)) { @@ -379,67 +359,6 @@ struct ubcm_device *ubcm_find_get_device(union ubcore_eid *eid) return target; } -static int ubcm_cdev_create(void) -{ - struct ubcm_context *cm_ctx = get_ubcm_ctx(); - int ret; - - ret = alloc_chrdev_region(&cm_ctx->ubcm_devno, 0, 1, UBCM_MODULE_NAME); - if (ret != 0) { - ubcm_log_err("Failed to alloc chrdev region, ret: %d.\n", ret); - return ret; - } - - /* create /sys/class/ubcm */ - ret = class_register(&g_ubcm_class); - if (ret != 0) { - ubcm_log_err("Failed to register ubcm class, ret: %d.\n", ret); - goto unreg_devno; - } - - cdev_init(&cm_ctx->ubcm_cdev, &g_ubcm_ops); - cm_ctx->ubcm_cdev.owner = THIS_MODULE; - - ret = cdev_add(&cm_ctx->ubcm_cdev, cm_ctx->ubcm_devno, 1); - if (ret != 0) { - ubcm_log_err("Failed to add ubcm chrdev, ret: %d.\n", ret); - goto unreg_class; - } - - /* create /dev/ubcm */ - cm_ctx->ubcm_dev = device_create(&g_ubcm_class, NULL, - cm_ctx->ubcm_devno, NULL, - UBCM_DEVICE_NAME); - if (IS_ERR_OR_NULL(cm_ctx->ubcm_dev)) { - ret = -1; - ubcm_log_err("Failed to create ubcm device, ret: %d.\n", - (int)PTR_ERR(cm_ctx->ubcm_dev)); - cm_ctx->ubcm_dev = NULL; - goto del_cdev; - } - - ubcm_log_info("Finish to create ubcm chrdev.\n"); - return 0; -del_cdev: - cdev_del(&cm_ctx->ubcm_cdev); -unreg_class: - class_unregister(&g_ubcm_class); -unreg_devno: - unregister_chrdev_region(cm_ctx->ubcm_devno, 1); - return ret; -} - -static void ubcm_cdev_destroy(void) -{ - struct ubcm_context *cm_ctx = get_ubcm_ctx(); - - device_destroy(&g_ubcm_class, cm_ctx->ubcm_cdev.dev); - cm_ctx->ubcm_dev = NULL; - cdev_del(&cm_ctx->ubcm_cdev); - class_unregister(&g_ubcm_class); - unregister_chrdev_region(cm_ctx->ubcm_devno, 1); -} - int ubcm_init(void) { int ret; @@ -456,24 +375,17 @@ int ubcm_init(void) goto uninit_mad; } - ret = ubcm_cdev_create(); - if (ret != 0) { - ubcm_log_err("Failed to create ubcm chrdev, ret: %d.\n", ret); - goto uninit_base; - } - ret = ubcm_genl_init(); if (ret != 0) { ubcm_log_err("Failed to init ubcm generic netlink, ret: %d.\n", ret); - goto destroy_cdev; + goto uninit_base; } ubcore_register_cm_send_ops(ubmad_ubc_send); - pr_info("ubcm module init success.\n"); + ubcm_log_info("ubcm module init success.\n"); return 0; -destroy_cdev: - ubcm_cdev_destroy(); + uninit_base: ubcm_base_uninit(); uninit_mad: @@ -484,8 +396,7 @@ int ubcm_init(void) void ubcm_uninit(void) { ubcm_genl_uninit(); - ubcm_cdev_destroy(); ubcm_base_uninit(); ubmad_uninit(); - pr_info("ubcm module exits.\n"); + ubcm_log_info("ubcm module exits.\n"); } diff --git a/drivers/ub/urma/ubcore/ubcm/ub_cm.h b/drivers/ub/urma/ubcore/ubcm/ub_cm.h index 97f63f70e1e9..3c43d39e7df7 100644 --- a/drivers/ub/urma/ubcore/ubcm/ub_cm.h +++ b/drivers/ub/urma/ubcore/ubcm/ub_cm.h @@ -14,10 +14,7 @@ #include #include - - #include "net/ubcore_cm.h" - #include "ub_mad.h" #include "ubcm_genl.h" diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad.c b/drivers/ub/urma/ubcore/ubcm/ub_mad.c index 6ea30e66311d..fb479d6993d8 100644 --- a/drivers/ub/urma/ubcore/ubcm/ub_mad.c +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad.c @@ -11,13 +11,12 @@ #include #include - - - #include "ubcore_tp.h" #include "ubcm_log.h" - #include "ub_mad_priv.h" +#include "ub/urma/ubcore_uapi.h" +#include "ub/urma/ubcore_api.h" +#include "ubcore_log.h" // udma jetty id starts from 1 currently #define WK_JETTY_ID_INITIALIZER \ diff --git a/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h index e12b3b79d6ad..a4791f267c33 100644 --- a/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h +++ b/drivers/ub/urma/ubcore/ubcm/ub_mad_priv.h @@ -13,7 +13,6 @@ #define UB_MAD_PRIV_H #include - #include "ub_mad.h" /* well-known jetty (wk jetty) parameters */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c index 0fad31f42646..445a204f9ca8 100644 --- a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.c @@ -13,9 +13,6 @@ #include #include #include - - - #include "ub_mad.h" #include "ub_cm.h" #include "ubcm_log.h" diff --git a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h index 3e69e6bc941a..d0cd5f791eb6 100644 --- a/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h +++ b/drivers/ub/urma/ubcore/ubcm/ubcm_genl.h @@ -13,9 +13,6 @@ #define UBCM_GENL_H #include - - - #include "ub_mad.h" /* NETLINK_GENERIC related info */ diff --git a/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c index 3038ad13af49..a892fa6a4129 100644 --- a/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c +++ b/drivers/ub/urma/ubcore/ubcm/ubmad_datapath.c @@ -16,7 +16,7 @@ #include "ubcore_topo_info.h" #include "net/ubcore_cm.h" #include "ubcm_log.h" - +#include "ub/urma/ubcore_uapi.h" #include "ub_mad_priv.h" /** reliable communication **/ diff --git a/drivers/ub/urma/ubcore/ubcore_cdev_file.c b/drivers/ub/urma/ubcore/ubcore_cdev_file.c index a29182704ed2..5e03ba38ba35 100644 --- a/drivers/ub/urma/ubcore/ubcore_cdev_file.c +++ b/drivers/ub/urma/ubcore/ubcore_cdev_file.c @@ -12,10 +12,8 @@ #include #include #include - #include "ub/urma/ubcore_types.h" #include "ub/urma/ubcore_uapi.h" - #include "ubcore_log.h" #include "ubcore_device.h" #include "ubcore_cdev_file.h" diff --git a/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h index 19d97a208594..5c0ff8832a96 100644 --- a/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h +++ b/drivers/ub/urma/ubcore/ubcore_cmd_tlv.h @@ -14,7 +14,6 @@ #define UBCORE_CMD_TLV_H #include - #include "ubcore_cmd.h" #include "ubcore_uvs_cmd.h" diff --git a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c index c24f964f3e44..b810d4b78a44 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_adapter.c +++ b/drivers/ub/urma/ubcore/ubcore_connect_adapter.c @@ -9,15 +9,14 @@ * History: 2025-06-19: create file */ -#include "ubcore_connect_adapter.h" - #include - +#include "ubcore_log.h" #include "net/ubcore_net.h" #include "net/ubcore_session.h" -#include "ubcore_log.h" #include "ub/urma/ubcore_uapi.h" +#include "ubcore_connect_adapter.h" #include "ubcore_priv.h" +#include "ubcore_hash_table.h" enum msg_create_conn_result { CREATE_CONN_SUCCESS = 0, diff --git a/drivers/ub/urma/ubcore/ubcore_connect_bonding.c b/drivers/ub/urma/ubcore/ubcore_connect_bonding.c index 43c1c82aabf7..120dce42b653 100644 --- a/drivers/ub/urma/ubcore/ubcore_connect_bonding.c +++ b/drivers/ub/urma/ubcore/ubcore_connect_bonding.c @@ -10,11 +10,11 @@ */ #include "ubcore_connect_bonding.h" - #include "net/ubcore_net.h" #include "ubcore_priv.h" #include "ubcore_topo_info.h" #include "ub/urma/ubcore_uapi.h" +#include "ubcore_log.h" #define BONDING_UDATA_BUF_LEN 960 diff --git a/drivers/ub/urma/ubcore/ubcore_device.c b/drivers/ub/urma/ubcore/ubcore_device.c index 667cbe272052..7d9b56f1cb17 100644 --- a/drivers/ub/urma/ubcore/ubcore_device.c +++ b/drivers/ub/urma/ubcore/ubcore_device.c @@ -17,7 +17,6 @@ #include #include "ub/urma/ubcore_uapi.h" #include - #include "ubcore_log.h" #include "ubcore_device.h" #include "ubcore_tp_table.h" diff --git a/drivers/ub/urma/ubcore/ubcore_dp.c b/drivers/ub/urma/ubcore/ubcore_dp.c new file mode 100644 index 000000000000..1f357e0b21bb --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_dp.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Description: kmod ub data path API + * Author: sunfang + * Create: 2023-05-09 + * Note: + * History: 2023-05-09 + */ +#include "ubcore_log.h" +#include +#include +#include + +int ubcore_post_jetty_send_wr(struct ubcore_jetty *jetty, + struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_send_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_send_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_send_wr); + +int ubcore_post_jetty_recv_wr(struct ubcore_jetty *jetty, + struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jetty == NULL || jetty->ub_dev == NULL || + jetty->ub_dev->ops == NULL || + jetty->ub_dev->ops->post_jetty_recv_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jetty->ub_dev->ops; + return dev_ops->post_jetty_recv_wr(jetty, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jetty_recv_wr); + +int ubcore_post_jfs_wr(struct ubcore_jfs *jfs, struct ubcore_jfs_wr *wr, + struct ubcore_jfs_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->ub_dev->ops == NULL || + jfs->ub_dev->ops->post_jfs_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfs->ub_dev->ops; + return dev_ops->post_jfs_wr(jfs, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfs_wr); + +int ubcore_post_jfr_wr(struct ubcore_jfr *jfr, struct ubcore_jfr_wr *wr, + struct ubcore_jfr_wr **bad_wr) +{ + struct ubcore_ops *dev_ops; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->ub_dev->ops == NULL || + jfr->ub_dev->ops->post_jfr_wr == NULL || wr == NULL || + bad_wr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfr->ub_dev->ops; + return dev_ops->post_jfr_wr(jfr, wr, bad_wr); +} +EXPORT_SYMBOL(ubcore_post_jfr_wr); + +int ubcore_poll_jfc(struct ubcore_jfc *jfc, int cr_cnt, struct ubcore_cr *cr) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->poll_jfc == NULL || cr == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->poll_jfc(jfc, cr_cnt, cr); +} +EXPORT_SYMBOL(ubcore_poll_jfc); + +int ubcore_rearm_jfc(struct ubcore_jfc *jfc, bool solicited_only) +{ + struct ubcore_ops *dev_ops; + + if (jfc == NULL || jfc->ub_dev == NULL || jfc->ub_dev->ops == NULL || + jfc->ub_dev->ops->rearm_jfc == NULL) { + ubcore_log_err("Invalid parameter"); + return -EINVAL; + } + + dev_ops = jfc->ub_dev->ops; + return dev_ops->rearm_jfc(jfc, solicited_only); +} +EXPORT_SYMBOL(ubcore_rearm_jfc); diff --git a/drivers/ub/urma/ubcore/ubcore_genl.c b/drivers/ub/urma/ubcore/ubcore_genl.c index fcca98b8ef44..21e623be6f75 100644 --- a/drivers/ub/urma/ubcore/ubcore_genl.c +++ b/drivers/ub/urma/ubcore/ubcore_genl.c @@ -15,7 +15,6 @@ #include #include #include - #include "ub/urma/ubcore_api.h" #include "ubcore_msg.h" #include "ubcore_cmd.h" diff --git a/drivers/ub/urma/ubcore/ubcore_genl.h b/drivers/ub/urma/ubcore/ubcore_genl.h index 15e793977db8..8b849c9100d5 100644 --- a/drivers/ub/urma/ubcore/ubcore_genl.h +++ b/drivers/ub/urma/ubcore/ubcore_genl.h @@ -13,6 +13,7 @@ #define UBCORE_GENL_H #include "ub/urma/ubcore_types.h" + int ubcore_genl_init(void) __init; void ubcore_genl_exit(void); diff --git a/drivers/ub/urma/ubcore/ubcore_genl_admin.c b/drivers/ub/urma/ubcore/ubcore_genl_admin.c index 051deb9fb44e..e06eb51b2d41 100644 --- a/drivers/ub/urma/ubcore/ubcore_genl_admin.c +++ b/drivers/ub/urma/ubcore/ubcore_genl_admin.c @@ -25,7 +25,6 @@ #include "ubcore_main.h" #include "ubcore_genl_admin.h" - #define CB_ARGS_DEV_BUF 0 #define CB_ARGS_CMD_TYPE 1 #define CB_ARGS_SART_IDX 2 diff --git a/drivers/ub/urma/ubcore/ubcore_jetty.c b/drivers/ub/urma/ubcore/ubcore_jetty.c index b3d9f35d6a29..23a1a4855c1d 100644 --- a/drivers/ub/urma/ubcore/ubcore_jetty.c +++ b/drivers/ub/urma/ubcore/ubcore_jetty.c @@ -16,7 +16,6 @@ #include #include #include - #include "ubcore_connect_adapter.h" #include "ubcore_connect_bonding.h" #include "ubcore_log.h" @@ -1688,11 +1687,6 @@ int ubcore_unimport_jetty(struct ubcore_tjetty *tjetty) } EXPORT_SYMBOL(ubcore_unimport_jetty); -static inline void ubcore_put_advice(struct ubcore_tp_advice *advice) -{ - ubcore_put_tptable(advice->meta.ht); -} - static int ubcore_inner_bind_ub_jetty(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, struct ubcore_udata *udata) diff --git a/drivers/ub/urma/ubcore/ubcore_netdev.h b/drivers/ub/urma/ubcore/ubcore_netdev.h deleted file mode 100644 index 408069675e08..000000000000 --- a/drivers/ub/urma/ubcore/ubcore_netdev.h +++ /dev/null @@ -1,59 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved. - * - * Description: ubcore netdev head file - * Author: Chen Wen - * Create: 2023-07-14 - * Note: - * History: 2023-07-14: Create file - */ - -#ifndef UBCORE_NETDEV_H -#define UBCORE_NETDEV_H - -#include "ub/urma/ubcore_types.h" - -int ubcore_check_port_state(struct ubcore_device *dev); -void ubcore_fill_port_netdev(struct ubcore_device *dev, struct net_device *ndev, - uint8_t *port_list, uint8_t *port_cnt); - -int ubcore_sip_table_init(struct ubcore_sip_table *sip_table, uint32_t size); -void ubcore_sip_table_uninit(struct ubcore_sip_table *sip_table); - -uint32_t ubcore_sip_idx_alloc(struct ubcore_sip_table *sip_table); -int ubcore_sip_idx_free_without_lock(struct ubcore_sip_table *sip_table, - uint32_t idx); -int ubcore_sip_idx_free(struct ubcore_sip_table *sip_table, uint32_t idx); - -int ubcore_add_sip_entry(struct ubcore_sip_table *sip_table, - struct ubcore_sip_info *sip, uint32_t idx); - -int ubcore_del_net_addr(struct ubcore_device *dev, uint32_t idx); -int ubcore_del_sip_entry_without_lock(struct ubcore_sip_table *sip_table, - uint32_t idx); -int ubcore_del_sip_entry(struct ubcore_sip_table *sip_table, uint32_t idx); -int ubcore_lookup_sip_idx(struct ubcore_sip_table *sip_table, - struct ubcore_sip_info *sip, uint32_t *idx); -int ubcore_update_sip_entry(struct ubcore_sip_table *sip_table, - struct ubcore_sip_info *new_sip, uint32_t *sip_idx, - struct ubcore_sip_info *old_sip); -struct ubcore_device * -ubcore_lookup_mue_by_sip_addr(union ubcore_net_addr_union *addr, - enum ubcore_transport_type type); -int ubcore_notify_uvs_add_sip(struct ubcore_device *dev, - const struct ubcore_sip_info *sip, - uint32_t index); -int ubcore_notify_uvs_del_sip(struct ubcore_device *dev, - const struct ubcore_sip_info *sip, - uint32_t index); - -struct ubcore_sip_info * -ubcore_lookup_sip_info_without_lock(struct ubcore_sip_table *sip_table, - uint32_t idx); -struct ubcore_nlmsg *ubcore_new_sip_req_msg(struct ubcore_device *dev, - struct ubcore_sip_info *sip_info, - uint32_t index); - -void ubcore_free_netdev_port_list(struct ubcore_device *dev); -#endif diff --git a/drivers/ub/urma/ubcore/ubcore_netlink.c b/drivers/ub/urma/ubcore/ubcore_netlink.c index 5daaa5463832..2b15faafdf1c 100644 --- a/drivers/ub/urma/ubcore/ubcore_netlink.c +++ b/drivers/ub/urma/ubcore/ubcore_netlink.c @@ -17,7 +17,6 @@ #include "ubcore_tp.h" #include "ubcore_vtp.h" #include "ubcore_priv.h" -#include "ubcore_netdev.h" #include "ubcore_device.h" #include "ubcore_genl_define.h" #include "ubcore_workqueue.h" diff --git a/drivers/ub/urma/ubcore/ubcore_priv.h b/drivers/ub/urma/ubcore/ubcore_priv.h index 0cf80dd1c5fe..991da7c55f31 100644 --- a/drivers/ub/urma/ubcore/ubcore_priv.h +++ b/drivers/ub/urma/ubcore/ubcore_priv.h @@ -156,8 +156,7 @@ void ubcore_set_tp_init_cfg(struct ubcore_tp *tp, struct ubcore_tp_cfg *cfg); struct ubcore_tp *ubcore_create_tp(struct ubcore_device *dev, struct ubcore_tp_cfg *cfg, struct ubcore_udata *udata); -int ubcore_modify_tp(struct ubcore_device *dev, struct ubcore_tp_node *tp_node, - struct ubcore_tp_attr *tp_attr, struct ubcore_udata udata); + void ubcore_update_all_vlan_netaddr(struct ubcore_device *dev, enum ubcore_net_addr_op op); diff --git a/drivers/ub/urma/ubcore/ubcore_segment.c b/drivers/ub/urma/ubcore/ubcore_segment.c index 51576f8a2f49..f3e38e563648 100644 --- a/drivers/ub/urma/ubcore/ubcore_segment.c +++ b/drivers/ub/urma/ubcore/ubcore_segment.c @@ -11,7 +11,6 @@ #include "ubcore_connect_bonding.h" #include "ubcore_log.h" - #include "ubcore_priv.h" #include "ubcore_hash_table.h" #include "ubcore_tp.h" diff --git a/drivers/ub/urma/ubcore/ubcore_topo_info.h b/drivers/ub/urma/ubcore/ubcore_topo_info.h index e8ffe9386589..4f1cc8fcc4a6 100644 --- a/drivers/ub/urma/ubcore/ubcore_topo_info.h +++ b/drivers/ub/urma/ubcore/ubcore_topo_info.h @@ -12,7 +12,7 @@ #ifndef UBCORE_TOPO_INFO_H #define UBCORE_TOPO_INFO_H -#include +#include "ub/urma/ubcore_types.h" #define EID_LEN (16) #define MAX_PORT_NUM (9) diff --git a/drivers/ub/urma/ubcore/ubcore_tp.c b/drivers/ub/urma/ubcore/ubcore_tp.c new file mode 100644 index 000000000000..f1465bffba3b --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_tp.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * Description: ubcore tp implementation + * Author: Yan Fangfang + * Create: 2022-08-25 + * Note: + * History: 2022-08-25: Create file + */ + +#include +#include "ubcore_priv.h" +#include "ubcore_log.h" + +int ubcore_get_tp_list(struct ubcore_device *dev, struct ubcore_get_tp_cfg *cfg, + uint32_t *tp_cnt, struct ubcore_tp_info *tp_list, + struct ubcore_udata *udata) +{ + int ret; + + if (dev == NULL || dev->ops == NULL || dev->ops->get_tp_list == NULL || + cfg == NULL || tp_cnt == NULL || tp_list == NULL || *tp_cnt == 0) { + ubcore_log_err("Invalid parameter.\n"); + return -EINVAL; + } + + if (ubcore_check_trans_mode_valid(cfg->trans_mode) != true) { + ubcore_log_err("Invalid parameter, trans_mode: %d.\n", + (int)cfg->trans_mode); + return -EINVAL; + } + + ret = dev->ops->get_tp_list(dev, cfg, tp_cnt, tp_list, udata); + if (ret != 0) + ubcore_log_err("Failed to get to list, ret: %d.\n", ret); + + return ret; +} +EXPORT_SYMBOL(ubcore_get_tp_list); + diff --git a/drivers/ub/urma/ubcore/ubcore_tp.h b/drivers/ub/urma/ubcore/ubcore_tp.h index cc8eab6c7b44..b7646510edee 100644 --- a/drivers/ub/urma/ubcore/ubcore_tp.h +++ b/drivers/ub/urma/ubcore/ubcore_tp.h @@ -13,19 +13,6 @@ #define UBCORE_TP_H #include "ub/urma/ubcore_types.h" -#include "ubcore_tp_table.h" -#include "ubcore_netlink.h" - -struct ubcore_tp_meta { - struct ubcore_hash_table *ht; - uint32_t hash; - struct ubcore_tp_key key; -}; - -struct ubcore_tp_advice { - struct ubcore_ta ta; - struct ubcore_tp_meta meta; -}; static inline bool ubcore_have_ops(struct ubcore_device *dev) { @@ -44,37 +31,4 @@ static inline bool ubcore_have_tp_ctrlplane_ops(struct ubcore_device *dev) dev->ops->get_tp_list && dev->ops->active_tp); } -struct ubcore_nlmsg *ubcore_handle_restore_tp_req(struct ubcore_nlmsg *req); - -/* bind tp APIs */ -int ubcore_bind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, - struct ubcore_tp_advice *advice, struct ubcore_udata *udata); -int ubcore_unbind_tp(struct ubcore_jetty *jetty, struct ubcore_tjetty *tjetty, - struct ubcore_tp_advice *advice); - -/* Called when clear tp table */ -int ubcore_destroy_tp(struct ubcore_tp *tp); - -/* restore tp from error state */ -void ubcore_restore_tp(struct ubcore_device *dev, struct ubcore_tp *tp); -int ubcore_restore_tp_error_to_rtr(struct ubcore_device *dev, - struct ubcore_tp *tp, uint32_t rx_psn, - uint32_t tx_psn, uint16_t data_udp_start, - uint16_t ack_udp_start); -int ubcore_restore_tp_error_to_rts(struct ubcore_device *dev, - struct ubcore_tp *tp); -int ubcore_change_tp_to_err(struct ubcore_device *dev, struct ubcore_tp *tp); - -void ubcore_report_tp_suspend(struct ubcore_device *dev, struct ubcore_tp *tp); -void ubcore_report_tp_flush_done(struct ubcore_device *dev, - struct ubcore_tp *tp); - -void ubcore_modify_tp_attr(struct ubcore_tp *tp, struct ubcore_tp_attr *attr, - union ubcore_tp_attr_mask mask); -int ubcore_modify_tp_state_check(struct ubcore_tp *tp, - enum ubcore_tp_state new_state); -void ubcore_tp_get(void *obj); -void ubcore_tp_kref_put(struct ubcore_tp *tp); -void ubcore_put_ta_jetty(struct ubcore_ta *ta); -void ubcore_put_target_ta_jetty(struct ubcore_ta *ta); #endif diff --git a/drivers/ub/urma/ubcore/ubcore_umem.c b/drivers/ub/urma/ubcore/ubcore_umem.c new file mode 100644 index 000000000000..6cde6b2870c9 --- /dev/null +++ b/drivers/ub/urma/ubcore/ubcore_umem.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * Description: ubcore device add and remove ops file + * Author: Fan Yizhen + * Create: 2022-07-28 + * Note: + * History: 2022-07-28: create file + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ubcore_log.h" +#include + +static void umem_unpin_pages(struct ubcore_umem *umem, uint64_t nents) +{ + struct scatterlist *sg; + uint32_t i; + + for_each_sg(umem->sg_head.sgl, sg, nents, i) { + struct page *page = NULL; + + if (sg == NULL) { + ubcore_log_err("Invalid sg pointer.\n"); + continue; + } + + page = sg_page(sg); + if (page == NULL) { + ubcore_log_err("Invalid page pointer.\n"); + continue; + } + /* Prevent a large number of concurrent accesses + * from holding spin_lock for too long, causing system reset + */ + cond_resched(); + unpin_user_page(page); + } + sg_free_table(&umem->sg_head); +} + +static void umem_free_sgt(struct ubcore_umem *umem) +{ + umem_unpin_pages(umem, umem->sg_head.nents); +} + +static inline uint64_t umem_cal_npages(uint64_t va, uint64_t len) +{ + return (ALIGN(va + len, PAGE_SIZE) - ALIGN_DOWN(va, PAGE_SIZE)) / + PAGE_SIZE; +} + +static int umem_pin_pages(uint64_t cur_base, uint64_t npages, + uint32_t gup_flags, struct page **page_list) +{ + int pinned; + + pinned = pin_user_pages_fast(cur_base, + min_t(unsigned long, (unsigned long)npages, + PAGE_SIZE / sizeof(struct page *)), + gup_flags | FOLL_LONGTERM, page_list); + + return pinned; +} + +static uint64_t umem_atomic_add(uint64_t npages, struct mm_struct *mm) +{ + uint64_t ret; + + ret = atomic64_add_return(npages, &mm->pinned_vm); + + return ret; +} + +static void umem_atomic_sub(uint64_t npages, struct mm_struct *mm) +{ + atomic64_sub(npages, &mm->pinned_vm); +} + +static struct scatterlist *umem_sg_set_page(struct scatterlist *sg_start, + int pinned, struct page **page_list) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sg_start, sg, pinned, i) { + sg_set_page(sg, page_list[i], PAGE_SIZE, 0); + } + return sg; +} + +static int umem_add_new_pinned(struct ubcore_umem *umem, uint64_t npages) +{ + uint64_t lock_limit; + uint64_t new_pinned; + + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + new_pinned = umem_atomic_add(npages, umem->owning_mm); + if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) { + ubcore_log_err( + "Npages to be pinned is greater than RLIMIT_MEMLOCK[%llu].\n", + lock_limit); + return -ENOMEM; + } + return 0; +} + +static uint64_t umem_pin_all_pages(struct ubcore_umem *umem, uint64_t npages, + uint32_t gup_flags, struct page **page_list) +{ + struct scatterlist *sg_list_start = umem->sg_head.sgl; + uint64_t cur_base = umem->va & PAGE_MASK; + uint64_t page_count = npages; + int pinned; + + while (page_count != 0) { + cond_resched(); + pinned = umem_pin_pages(cur_base, page_count, gup_flags, + page_list); + if (pinned < 0) { + ubcore_log_err( + "Pin pages failed, cur_base: %llx, page_count: %llx, pinned: %d.\n", + cur_base, page_count, pinned); + return npages - page_count; + } + cur_base += (uint64_t)pinned * PAGE_SIZE; + page_count -= (uint64_t)pinned; + sg_list_start = + umem_sg_set_page(sg_list_start, pinned, page_list); + } + return npages; +} + +static int umem_verify_input(struct ubcore_device *ub_dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) +{ + if (ub_dev == NULL || ((va + len) < va) || + PAGE_ALIGN(va + len) < (va + len)) { + ubcore_log_err("Invalid parameter, va or len is invalid.\n"); + return -EINVAL; + } + if (flag.bs.non_pin == 1) { + ubcore_log_err("Non-pin mode is not supported.\n"); + return -EINVAL; + } + if (can_do_mlock() == 0) + return -EPERM; + return 0; +} + +static int umem_dma_map(struct ubcore_umem *umem, uint64_t npages, + unsigned long dma_attrs) +{ + int ret; + + ret = dma_map_sg_attrs(umem->ub_dev->dma_dev, umem->sg_head.sgl, + (int)npages, DMA_BIDIRECTIONAL, dma_attrs); + if (ret == 0) { + ubcore_log_err("Dma map failed, ret: %d\n", ret); + return -ENOMEM; + } + umem->nmap += (uint32_t)ret; + return 0; +} + +static int ubcore_fill_umem(struct ubcore_umem *umem, struct ubcore_device *dev, + uint64_t va, uint64_t len, + union ubcore_umem_flag flag) +{ + umem->ub_dev = dev; + umem->va = va; + umem->length = len; + umem->flag = flag; + umem->owning_mm = current->mm; + if (!umem->owning_mm) { + ubcore_log_err("mm is null.\n"); + return -EINVAL; + } + mmgrab(umem->owning_mm); + return 0; +} + +static struct ubcore_umem *ubcore_get_target_umem(struct ubcore_device *dev, + uint64_t va, uint64_t len, + union ubcore_umem_flag flag, + struct page **page_list) +{ + /* FOLL_LONGTERM flag added when pin_user_pages_fast called */ + uint32_t gup_flags = (flag.bs.writable == 1) ? FOLL_WRITE : 0; + unsigned long dma_attrs = 0; + struct ubcore_umem *umem; + uint64_t npages; + uint64_t pinned; + int ret = 0; + + umem = kzalloc(sizeof(*umem), GFP_KERNEL); + if (umem == NULL) { + ret = -ENOMEM; + goto out; + } + + ret = ubcore_fill_umem(umem, dev, va, len, flag); + if (ret != 0) { + kfree(umem); + goto out; + } + + npages = umem_cal_npages(umem->va, umem->length); + if (npages == 0 || npages > UINT_MAX) { + ret = -EINVAL; + goto umem_kfree; + } + + ret = umem_add_new_pinned(umem, npages); + if (ret != 0) + goto sub_pinned_vm; + + ret = sg_alloc_table(&umem->sg_head, (unsigned int)npages, GFP_KERNEL); + if (ret != 0) + goto sub_pinned_vm; + + pinned = umem_pin_all_pages(umem, npages, gup_flags, page_list); + if (pinned != npages) { + ret = -ENOMEM; + goto umem_release; + } + + ret = umem_dma_map(umem, npages, dma_attrs); + if (ret != 0) + goto umem_release; + + goto out; + +umem_release: + umem_unpin_pages(umem, pinned); +sub_pinned_vm: + umem_atomic_sub(npages, umem->owning_mm); +umem_kfree: + mmdrop(umem->owning_mm); + kfree(umem); +out: + free_page((unsigned long)page_list); + return ret != 0 ? ERR_PTR(ret) : umem; +} + +struct ubcore_umem *ubcore_umem_get(struct ubcore_device *dev, uint64_t va, + uint64_t len, union ubcore_umem_flag flag) +{ + struct page **page_list; + int ret; + + ret = umem_verify_input(dev, va, len, flag); + if (ret < 0) + return ERR_PTR(ret); + + page_list = (struct page **)__get_free_page(GFP_KERNEL); + if (page_list == NULL) + return ERR_PTR(-ENOMEM); + + return ubcore_get_target_umem(dev, va, len, flag, page_list); +} +EXPORT_SYMBOL(ubcore_umem_get); + +void ubcore_umem_release(struct ubcore_umem *umem) +{ + uint64_t npages; + + if (IS_ERR_OR_NULL(umem) || umem->ub_dev == NULL || + umem->owning_mm == NULL) { + ubcore_log_err("Invalid parameter.\n"); + return; + } + + if (((umem->va + umem->length) < umem->va) || + PAGE_ALIGN(umem->va + umem->length) < (umem->va + umem->length)) { + ubcore_log_err("Invalid parameter, va or len is invalid.\n"); + return; + } + + npages = umem_cal_npages(umem->va, umem->length); + dma_unmap_sg(umem->ub_dev->dma_dev, umem->sg_head.sgl, (int)umem->nmap, + DMA_BIDIRECTIONAL); + umem_free_sgt(umem); + umem_atomic_sub(npages, umem->owning_mm); + mmdrop(umem->owning_mm); + kfree(umem); +} +EXPORT_SYMBOL(ubcore_umem_release); + +uint64_t ubcore_umem_find_best_page_size(struct ubcore_umem *umem, + uint64_t page_size_bitmap, uint64_t va) +{ + uint64_t tmp_ps_bitmap; + struct scatterlist *sg; + uint64_t tmp_va, page_off; + dma_addr_t mask; + int i; + + if (IS_ERR_OR_NULL(umem)) { + ubcore_log_err("Invalid parameter.\n"); + return 0; + } + tmp_ps_bitmap = page_size_bitmap & + GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); + + tmp_va = va; + mask = tmp_ps_bitmap & GENMASK(BITS_PER_LONG - 1, + bits_per((umem->length - 1 + va) ^ va)); + page_off = umem->va & ~PAGE_MASK; + + for_each_sg(umem->sg_head.sgl, sg, umem->sg_head.nents, i) { + mask |= (sg_dma_address(sg) + page_off) ^ tmp_va; + tmp_va += sg_dma_len(sg) - page_off; + if (i != (umem->sg_head.nents - 1)) + mask |= tmp_va; + page_off = 0; + } + + if (mask) + tmp_ps_bitmap &= GENMASK(count_trailing_zeros(mask), 0); + + return tmp_ps_bitmap ? rounddown_pow_of_two(tmp_ps_bitmap) : 0; +} +EXPORT_SYMBOL(ubcore_umem_find_best_page_size); diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c index 7d82f4a43b94..4e5ce742355a 100644 --- a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.c @@ -12,7 +12,6 @@ #include #include #include - #include "ub/urma/ubcore_api.h" #include "ubcore_device.h" #include "ubcore_priv.h" diff --git a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h index b274a4cc055a..82a7ddc1bf80 100644 --- a/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h +++ b/drivers/ub/urma/ubcore/ubcore_uvs_cmd.h @@ -14,7 +14,6 @@ #include #include - #include "ubcore_cmd.h" #include "ubcore_log.h" #include "ub/urma/ubcore_types.h" diff --git a/drivers/ub/urma/ubcore/ubcore_workqueue.h b/drivers/ub/urma/ubcore/ubcore_workqueue.h index 3bfee0d2722b..0eb44f0b9a7d 100644 --- a/drivers/ub/urma/ubcore/ubcore_workqueue.h +++ b/drivers/ub/urma/ubcore/ubcore_workqueue.h @@ -14,7 +14,6 @@ #include #include - #include "ub/urma/ubcore_types.h" #define UBCORE_QUEUE_NAME_LEN 32 diff --git a/include/ub/urma/ubcore_types.h b/include/ub/urma/ubcore_types.h index 741752e42a73..15070a50d1de 100644 --- a/include/ub/urma/ubcore_types.h +++ b/include/ub/urma/ubcore_types.h @@ -596,7 +596,7 @@ union ubcore_order_type_cap { uint32_t oi : 1; uint32_t ol : 1; uint32_t no : 1; - uint32_t reserved : 27; + uint32_t reserved : 28; } bs; uint32_t value; }; @@ -615,7 +615,7 @@ union urma_tp_feature { struct { uint32_t rm_multi_path : 1; uint32_t rc_multi_path : 1; - uint32_t reserved : 28; + uint32_t reserved : 30; } bs; uint32_t value; }; -- Gitee From e7f00a991b07d1998a79f23cdb62392335d5e24b Mon Sep 17 00:00:00 2001 From: Yongqiang Guo Date: Mon, 10 Nov 2025 20:48:59 +0800 Subject: [PATCH 5/5] ubagg: implement ubagg basic infrastructure components urma inclusion category: feature bugzilla: https://gitee.com/openeuler/release-management/issues/ID3WJX ---------------------------------------------- This patch introduces the foundational infrastructure for the ubagg (URMA Bonding Aggregation) module, which provides device aggregation capabilities within the URMA framework. The implementation includes core data structures and utility components essential for managing bonded device resources. The ubagg module provides the following core infrastructure components: 1. Resource Management: Implements bitmap-based resource allocation for segments, jetties, JFRs, JFCs, and JFSs, ensuring efficient resource tracking and allocation across aggregated devices. 2. Hash Table Operations: Provides comprehensive hash table management with thread-safe operations for storing and retrieving ubagg objects including segments, jetties, and JFRs. Supports both locked and lock-free operations for different performance requirements. 3. Logging System: Implements a flexible logging framework with configurable log levels and rate-limited output to prevent system overload. Supports emergency, alert, critical, error, warning, notice, info, and debug log levels with proper rate limiting. 4. Jetty Management: Defines interfaces for importing and unimporting JFRs and jetties across aggregated devices, providing the foundation for distributed resource access in bonded device configurations. The infrastructure supports device aggregation with master-slave configurations, allowing up to UBAGG_DEV_MAX_NUM devices to be managed as a single logical entity. This enables load balancing and fault tolerance capabilities across multiple underlying URMA devices. Signed-off-by: Yongqiang Guo --- drivers/ub/urma/Makefile | 1 + drivers/ub/urma/ubagg/Makefile | 17 + drivers/ub/urma/ubagg/ubagg_bitmap.c | 136 ++ drivers/ub/urma/ubagg/ubagg_bitmap.h | 42 + drivers/ub/urma/ubagg/ubagg_hash_table.c | 230 +++ drivers/ub/urma/ubagg/ubagg_hash_table.h | 70 + drivers/ub/urma/ubagg/ubagg_ioctl.c | 1817 ++++++++++++++++++++++ drivers/ub/urma/ubagg/ubagg_ioctl.h | 175 +++ drivers/ub/urma/ubagg/ubagg_jetty.c | 77 + drivers/ub/urma/ubagg/ubagg_jetty.h | 29 + drivers/ub/urma/ubagg/ubagg_log.c | 15 + drivers/ub/urma/ubagg/ubagg_log.h | 94 ++ drivers/ub/urma/ubagg/ubagg_main.c | 157 ++ drivers/ub/urma/ubagg/ubagg_seg.c | 129 ++ drivers/ub/urma/ubagg/ubagg_seg.h | 38 + drivers/ub/urma/ubagg/ubagg_topo_info.c | 71 + drivers/ub/urma/ubagg/ubagg_topo_info.h | 52 + drivers/ub/urma/ubagg/ubagg_types.h | 137 ++ 18 files changed, 3287 insertions(+) create mode 100644 drivers/ub/urma/ubagg/Makefile create mode 100644 drivers/ub/urma/ubagg/ubagg_bitmap.c create mode 100644 drivers/ub/urma/ubagg/ubagg_bitmap.h create mode 100644 drivers/ub/urma/ubagg/ubagg_hash_table.c create mode 100644 drivers/ub/urma/ubagg/ubagg_hash_table.h create mode 100644 drivers/ub/urma/ubagg/ubagg_ioctl.c create mode 100644 drivers/ub/urma/ubagg/ubagg_ioctl.h create mode 100644 drivers/ub/urma/ubagg/ubagg_jetty.c create mode 100644 drivers/ub/urma/ubagg/ubagg_jetty.h create mode 100644 drivers/ub/urma/ubagg/ubagg_log.c create mode 100644 drivers/ub/urma/ubagg/ubagg_log.h create mode 100644 drivers/ub/urma/ubagg/ubagg_main.c create mode 100644 drivers/ub/urma/ubagg/ubagg_seg.c create mode 100644 drivers/ub/urma/ubagg/ubagg_seg.h create mode 100644 drivers/ub/urma/ubagg/ubagg_topo_info.c create mode 100644 drivers/ub/urma/ubagg/ubagg_topo_info.h create mode 100644 drivers/ub/urma/ubagg/ubagg_types.h diff --git a/drivers/ub/urma/Makefile b/drivers/ub/urma/Makefile index 6b3fc1efa34d..4f7d1ebdb4a1 100644 --- a/drivers/ub/urma/Makefile +++ b/drivers/ub/urma/Makefile @@ -4,4 +4,5 @@ # obj-$(CONFIG_UB_URMA) += ubcore/ +obj-$(CONFIG_UB_URMA) += ubagg/ diff --git a/drivers/ub/urma/ubagg/Makefile b/drivers/ub/urma/ubagg/Makefile new file mode 100644 index 000000000000..aff596a56dc5 --- /dev/null +++ b/drivers/ub/urma/ubagg/Makefile @@ -0,0 +1,17 @@ + +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux kernel UB device drivers. +# +ccflags-y += -I$(src) + +ubagg-objs := ubagg_bitmap.o \ +ubagg_log.o \ +ubagg_seg.o \ +ubagg_hash_table.o \ +ubagg_ioctl.o \ +ubagg_topo_info.o \ +ubagg_main.o \ +ubagg_jetty.o + +obj-$(CONFIG_UB_URMA) += ubagg.o diff --git a/drivers/ub/urma/ubagg/ubagg_bitmap.c b/drivers/ub/urma/ubagg/ubagg_bitmap.c new file mode 100644 index 000000000000..11378b2ee1c2 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_bitmap.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#include "ubagg_bitmap.h" +#include "ubagg_log.h" + +struct ubagg_bitmap *ubagg_bitmap_alloc(uint32_t bitmap_size) +{ + struct ubagg_bitmap *bitmap; + + bitmap = kcalloc(1, sizeof(struct ubagg_bitmap), GFP_KERNEL); + if (bitmap == NULL) + return NULL; + bitmap->size = bitmap_size; + bitmap->bits = kcalloc(BITS_TO_LONGS(bitmap_size), + sizeof(unsigned long), GFP_KERNEL); + if (bitmap->bits == NULL) { + kfree(bitmap); + return NULL; + } + bitmap->alloc_idx = 0; + spin_lock_init(&bitmap->lock); + return bitmap; +} + +void ubagg_bitmap_free(struct ubagg_bitmap *bitmap) +{ + spin_lock(&bitmap->lock); + if (bitmap->bits != NULL) + kfree(bitmap->bits); + spin_unlock(&bitmap->lock); + kfree(bitmap); + bitmap = NULL; +} + +int ubagg_bitmap_alloc_idx_from_offset(struct ubagg_bitmap *bitmap, int offset) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + spin_lock(&bitmap->lock); + idx = (int)find_next_zero_bit(bitmap->bits, bitmap->size, offset); + if (idx >= bitmap->size || idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + + set_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_alloc_idx_from_offset_nolock(struct ubagg_bitmap *bitmap, + uint64_t offset) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + idx = (int)find_next_zero_bit(bitmap->bits, bitmap->size, offset); + if (idx >= bitmap->size || idx < 0) { + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + + set_bit(idx, bitmap->bits); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_alloc_idx(struct ubagg_bitmap *bitmap) +{ + int idx; + + if (bitmap == NULL) { + ubagg_log_err("bitmap NULL"); + return -1; + } + spin_lock(&bitmap->lock); + idx = (int)find_first_zero_bit(bitmap->bits, bitmap->size); + if (idx >= bitmap->size || idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("bitmap allocation failed.\n"); + return -1; + } + set_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + ubagg_log_info("bitmap allocation success., idx = %d\n", idx); + return idx; +} + +int ubagg_bitmap_use_id(struct ubagg_bitmap *bitmap, uint32_t id) +{ + spin_lock(&bitmap->lock); + if (test_bit(id, bitmap->bits) != 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("Bit %u is already taken.\n", id); + return -1; + } + set_bit(id, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} + +int ubagg_bitmap_free_idx(struct ubagg_bitmap *bitmap, int idx) +{ + spin_lock(&bitmap->lock); + if (idx < 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("idx invalid, idx:%d.\n", idx); + return -EINVAL; + } + if (test_bit(idx, bitmap->bits) == 0) { + spin_unlock(&bitmap->lock); + ubagg_log_err("idx not set: %d.\n", idx); + return -EINVAL; + } + clear_bit(idx, bitmap->bits); + spin_unlock(&bitmap->lock); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_bitmap.h b/drivers/ub/urma/ubagg/ubagg_bitmap.h new file mode 100644 index 000000000000..1c86fb1c36b5 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_bitmap.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_BITMAP_H +#define UBAGG_BITMAP_H + +#include +#include "ubagg_types.h" + +struct ubagg_bitmap { + unsigned long *bits; + uint32_t size; + spinlock_t lock; + uint64_t alloc_idx; /* Allocated index */ +}; + +#define UBAGG_BITMAP_MAX_SIZE (1 << 16) + +struct ubagg_bitmap *ubagg_bitmap_alloc(uint32_t bitmap_size); + +void ubagg_bitmap_free(struct ubagg_bitmap *bitmap); + +int ubagg_bitmap_alloc_idx(struct ubagg_bitmap *bitmap); + +int ubagg_bitmap_use_id(struct ubagg_bitmap *bitmap, uint32_t id); + +int ubagg_bitmap_free_idx(struct ubagg_bitmap *bitmap, int idx); + +int ubagg_bitmap_alloc_idx_from_offset(struct ubagg_bitmap *bitmap, int offset); + +int ubagg_bitmap_alloc_idx_from_offset_nolock(struct ubagg_bitmap *bitmap, + uint64_t offset); + +#endif diff --git a/drivers/ub/urma/ubagg/ubagg_hash_table.c b/drivers/ub/urma/ubagg/ubagg_hash_table.c new file mode 100644 index 000000000000..ed5c44477a5f --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_hash_table.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * Description: implement hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#include +#include "ubagg_hash_table.h" + +int ubagg_hash_table_alloc(struct ubagg_hash_table *ht, + struct ubagg_ht_param *p) +{ + uint32_t i; + + if (p == NULL || p->size == 0) + return -1; + + ht->head = kcalloc(p->size, sizeof(struct hlist_head), GFP_KERNEL); + if (ht->head == NULL) + return -ENOMEM; + + ht->p = *p; + for (i = 0; i < p->size; i++) + INIT_HLIST_HEAD(&ht->head[i]); + + spin_lock_init(&ht->lock); + kref_init(&ht->kref); + return 0; +} + +void ubagg_hash_table_free_with_cb(struct ubagg_hash_table *ht, + void (*free_cb)(void *)) +{ + struct hlist_node *pos = NULL, *next = NULL; + struct hlist_head *head; + uint32_t i; + void *obj; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + for (i = 0; i < ht->p.size; i++) { + hlist_for_each_safe(pos, next, &ht->head[i]) { + obj = ubagg_ht_obj(ht, pos); + hlist_del(pos); + spin_unlock(&ht->lock); + if (free_cb != NULL) + free_cb(obj); + else + kfree(obj); + spin_lock(&ht->lock); + } + } + head = ht->head; + ht->head = NULL; + spin_unlock(&ht->lock); + if (head != NULL) + kfree(head); +} + +void ubagg_hash_table_free(struct ubagg_hash_table *ht) +{ + ubagg_hash_table_free_with_cb(ht, NULL); +} + +void ubagg_hash_table_add_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + INIT_HLIST_NODE(hnode); + hlist_add_head(hnode, &ht->head[hash % ht->p.size]); +} + +void ubagg_hash_table_add(struct ubagg_hash_table *ht, struct hlist_node *hnode, + uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return; + } + ubagg_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); +} + +void ubagg_hash_table_remove_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + if (ht->head == NULL) + return; + + hlist_del_init(hnode); +} + +void ubagg_hash_table_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + ubagg_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); +} + +int ubagg_hash_table_check_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode) +{ + spin_lock(&ht->lock); + if (hlist_unhashed(hnode)) { + spin_unlock(&ht->lock); + return -EINVAL; + } + ubagg_hash_table_remove_nolock(ht, hnode); + spin_unlock(&ht->lock); + return 0; +} + +void *ubagg_hash_table_lookup_nolock_get(struct ubagg_hash_table *ht, + uint32_t hash, const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + + return obj; +} + +void *ubagg_hash_table_lookup_get(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubagg_hash_table_lookup_nolock_get(ht, hash, key); + + spin_unlock(&ht->lock); + return obj; +} + +void *ubagg_hash_table_lookup_nolock(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + struct hlist_node *pos = NULL; + void *obj = NULL; + + hlist_for_each(pos, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + break; + } + obj = NULL; + } + return obj; +} + +void *ubagg_hash_table_lookup(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + obj = ubagg_hash_table_lookup_nolock(ht, hash, key); + spin_unlock(&ht->lock); + return obj; +} + +/* Do not insert a new entry if an old entry with the same key exists */ +int ubagg_hash_table_find_add(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash) +{ + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return -EINVAL; + } + /* Old entry with the same key exists */ + if (ubagg_hash_table_lookup_nolock(ht, hash, ubagg_ht_key(ht, hnode)) != + NULL) { + spin_unlock(&ht->lock); + return -EEXIST; + } + ubagg_hash_table_add_nolock(ht, hnode, hash); + spin_unlock(&ht->lock); + return 0; +} + +void *ubagg_hash_table_find_remove(struct ubagg_hash_table *ht, uint32_t hash, + const void *key) +{ + struct hlist_node *pos = NULL, *next = NULL; + void *obj = NULL; + + spin_lock(&ht->lock); + if (ht->head == NULL) { + spin_unlock(&ht->lock); + return NULL; + } + hlist_for_each_safe(pos, next, &ht->head[hash % ht->p.size]) { + obj = ubagg_ht_obj(ht, pos); + if (ht->p.key_size > 0 && + memcmp(ubagg_ht_key(ht, pos), key, ht->p.key_size) == 0) { + hlist_del(pos); + break; + } + obj = NULL; + } + spin_unlock(&ht->lock); + return obj; +} diff --git a/drivers/ub/urma/ubagg/ubagg_hash_table.h b/drivers/ub/urma/ubagg/ubagg_hash_table.h new file mode 100644 index 000000000000..6679c3213161 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_hash_table.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2025. All rights reserved. + * + * Description: define hash table ops + * Author: Yan Fangfang + * Create: 2022-08-03 + * Note: + * History: 2022-08-03 Yan Fangfang Add base code + */ + +#ifndef UBAGG_HASH_TABLE_H +#define UBAGG_HASH_TABLE_H + +#include "ubagg_types.h" + +static inline void *ubagg_ht_obj(const struct ubagg_hash_table *ht, + const struct hlist_node *hnode) +{ + return (char *)hnode - ht->p.node_offset; +} + +static inline void *ubagg_ht_key(const struct ubagg_hash_table *ht, + const struct hlist_node *hnode) +{ + return ((char *)hnode - ht->p.node_offset) + ht->p.key_offset; +} + +/* Init ht head, not calloc hash table itself */ +int ubagg_hash_table_alloc(struct ubagg_hash_table *ht, + struct ubagg_ht_param *p); +/* Free ht head, not release hash table itself */ +void ubagg_hash_table_free(struct ubagg_hash_table *ht); + +void ubagg_hash_table_free_with_cb(struct ubagg_hash_table *ht, + void (*free_cb)(void *)); + +void ubagg_hash_table_add(struct ubagg_hash_table *ht, struct hlist_node *hnode, + uint32_t hash); + +void ubagg_hash_table_add_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); + +void ubagg_hash_table_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +int ubagg_hash_table_check_remove(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +void ubagg_hash_table_remove_nolock(struct ubagg_hash_table *ht, + struct hlist_node *hnode); + +void *ubagg_hash_table_lookup(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_nolock(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_get(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); + +void *ubagg_hash_table_lookup_nolock_get(struct ubagg_hash_table *ht, + uint32_t hash, const void *key); + +void *ubagg_hash_table_find_remove(struct ubagg_hash_table *ht, uint32_t hash, + const void *key); +/* Do not insert a new entry if an old entry with the same key exists */ +int ubagg_hash_table_find_add(struct ubagg_hash_table *ht, + struct hlist_node *hnode, uint32_t hash); +#endif diff --git a/drivers/ub/urma/ubagg/ubagg_ioctl.c b/drivers/ub/urma/ubagg/ubagg_ioctl.c new file mode 100644 index 000000000000..c139ef3e3f52 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_ioctl.c @@ -0,0 +1,1817 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ +#include +#include +#include +#include + +#include +#include +#include +#include "ubagg_log.h" +#include "ubagg_ioctl.h" +#include "ubagg_jetty.h" +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_hash_table.h" + +#define UBAGG_DEVICE_MAX_EID_CNT 128 +#define UBAGG_MAX_BONDING_DEV_NUM 256 +#define UBAGG_DEV_NAME_PREFIX "bonding_dev_" +#define MAX_NUM_LEN 11 +#define BITMAP_OFFSET 1025 +#define BASE_DECIMAL 10 + +static LIST_HEAD(g_ubagg_dev_list); +static DEFINE_SPINLOCK(g_ubagg_dev_list_lock); + +struct seg_info_req { + struct ubcore_ubva ubva; + uint64_t len; + uint32_t token_id; +}; + +struct jetty_info_req { + struct ubcore_jetty_id jetty_id; + bool is_jfr; +}; + +static struct ubagg_ht_param g_ubagg_ht_params[] = { + [UBAGG_HT_SEGMENT_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_seg_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_target_seg), + sizeof(uint32_t) }, + [UBAGG_HT_JETTY_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_jetty_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_jetty), sizeof(uint32_t) }, + [UBAGG_HT_JFR_HT] = { UBAGG_BITMAP_SIZE, + sizeof(struct ubagg_jfr_hash_node) - + sizeof(struct hlist_node), + sizeof(struct ubcore_jfr), sizeof(uint32_t) }, +}; + +static void ubagg_dev_release(struct kref *kref) +{ + struct ubagg_device *dev = container_of(kref, struct ubagg_device, ref); + + kfree(dev); +} + +void ubagg_dev_ref_get(struct ubagg_device *dev) +{ + kref_get(&dev->ref); +} + +void ubagg_dev_ref_put(struct ubagg_device *dev) +{ + kref_put(&dev->ref, ubagg_dev_release); +} + +struct ubagg_dev_name_eid_arr { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + char bonding_eid[EID_LEN]; +}; +static struct ubagg_dev_name_eid_arr + g_name_eid_arr[UBAGG_MAX_BONDING_DEV_NUM] = { 0 }; +static DEFINE_MUTEX(g_name_eid_arr_lock); + +static bool g_device_id_has_use[UBAGG_MAX_BONDING_DEV_NUM] = { 0 }; +static DEFINE_MUTEX(g_device_id_lock); + +static int find_bond_device_id(void) +{ + int use_id, i; + + mutex_lock(&g_device_id_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (g_device_id_has_use[i] == false) { + use_id = i; + g_device_id_has_use[i] = true; + break; + } + } + mutex_unlock(&g_device_id_lock); + if (i == UBAGG_MAX_BONDING_DEV_NUM) { + ubagg_log_err("no free device id.\n"); + return -1; + } + return use_id; +} + +static void release_bond_device_id(int id) +{ + mutex_lock(&g_device_id_lock); + g_device_id_has_use[id] = false; + mutex_unlock(&g_device_id_lock); +} + +static int release_bond_device_id_with_name(const char *str) +{ + const char *underscore_pos; + int id; + int ret; + + if (!str) { + ubagg_log_err("name str is null\n"); + return -EINVAL; + } + + underscore_pos = strrchr(str, '_'); + if (!underscore_pos) { + ubagg_log_err("invalid dev name: %s\n", str); + return -EINVAL; + } + if (underscore_pos[1] == '\0') { + ubagg_log_err("dev name is invalid\n"); + return -EINVAL; + } + ret = kstrtoint(underscore_pos + 1, BASE_DECIMAL, &id); + if (ret) { + ubagg_log_err("str to int failed\n"); + return ret; + } + release_bond_device_id(id); + return 0; +} + +static char *generate_master_dev_name(void) +{ + char *name = NULL; + int cur_id; + int max_length; + + cur_id = find_bond_device_id(); + if (cur_id < 0) { + ubagg_log_err("no free device id.\n"); + return NULL; + } + + max_length = strlen(UBAGG_DEV_NAME_PREFIX) + MAX_NUM_LEN; + name = kmalloc_array(max_length, sizeof(char), GFP_KERNEL); + if (name == NULL) { + release_bond_device_id(cur_id); + ubagg_log_err("malloc master dev name failed.\n"); + return NULL; + } + (void)snprintf(name, max_length, "%s%d", UBAGG_DEV_NAME_PREFIX, cur_id); + return name; +} + +static bool ubagg_dev_exists(char *dev_name) +{ + struct ubagg_device *dev; + + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) + return true; + } + return false; +} + +static struct ubagg_device *ubagg_find_dev_by_name(char *dev_name) +{ + struct ubagg_device *dev; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return dev; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return NULL; +} + +static struct ubagg_device * +ubagg_find_dev_by_name_and_rmv_from_list(char *dev_name) +{ + struct ubagg_device *dev, *target = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev_name, dev->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + target = dev; + list_del(&dev->list_node); + ubagg_dev_ref_put(dev); + break; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return target; +} + +static bool get_slave_dev(char *dev_name, struct ubagg_slave_device *slave_dev) +{ + struct ubagg_device *ubagg_dev = ubagg_find_dev_by_name(dev_name); + int i; + + if (ubagg_dev == NULL) { + ubagg_log_err("aggregation device not exist."); + return false; + } + + slave_dev->slave_dev_num = ubagg_dev->slave_dev_num; + for (i = 0; i < ubagg_dev->slave_dev_num; i++) + (void)memcpy(slave_dev->slave_dev_name[i], + ubagg_dev->slave_dev_name[i], + UBAGG_MAX_DEV_NAME_LEN); + return true; +} + +static int ubagg_get_slave_device(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_slave_device slave_dev = { 0 }; + int ret; + + if (!get_slave_dev(dev->dev_name, &slave_dev)) { + ubagg_log_err("ubagg dev not exist:%s", dev->dev_name); + return -ENXIO; + } + + if (user_ctl->out.len < sizeof(struct ubagg_slave_device)) { + ubagg_log_err( + "ubagg user ctl has no enough space, buffer size:%u, needed size:%lu", + user_ctl->out.len, sizeof(struct ubagg_slave_device)); + return -ENOSPC; + } + + ret = copy_to_user((void __user *)user_ctl->out.addr, + (void *)&slave_dev, sizeof(slave_dev)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return 0; +} + +static struct ubagg_topo_info_out *get_topo_info(void) +{ + struct ubagg_topo_info_out *out = NULL; + struct ubagg_topo_map *topo_map = NULL; + + topo_map = get_global_ubagg_map(); + if (topo_map == NULL) + return NULL; + out = kzalloc(sizeof(struct ubagg_topo_info_out), GFP_KERNEL); + if (out == NULL) + return NULL; + (void)memcpy(out->topo_info, topo_map->topo_infos, + sizeof(topo_map->topo_infos)); + out->node_num = topo_map->node_num; + return out; +} + +static int ubagg_get_topo_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_topo_info_out *topo_info_out = NULL; + int ret; + + topo_info_out = get_topo_info(); + if (!topo_info_out) { + ubagg_log_err("ubagg dev topo info does not exist:%s", + dev->dev_name); + return -ENXIO; + } + + if (user_ctl->out.len < sizeof(struct ubagg_topo_info_out)) { + ubagg_log_err( + "ubagg user ctl has no enough space, buffer size:%u, needed size:%lu", + user_ctl->out.len, sizeof(struct ubagg_topo_info_out)); + kfree(topo_info_out); + return -ENOSPC; + } + + ret = copy_to_user((void __user *)user_ctl->out.addr, + (void *)topo_info_out, + sizeof(struct ubagg_topo_info_out)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + kfree(topo_info_out); + return -EFAULT; + } + kfree(topo_info_out); + return 0; +} + +static int ubagg_get_jfr_id(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + uint32_t id; + int ret; + + if ((ubagg_dev == NULL) || (ubagg_dev->jfr_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->jfr_bitmap NULL"); + return -1; + } + id = ubagg_bitmap_alloc_idx(ubagg_dev->jfr_bitmap); + ret = copy_to_user((void __user *)user_ctl->out.addr, (void *)&id, + sizeof(uint32_t)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return ret; +} + +static int ubagg_get_jetty_id(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + uint32_t id; + int ret; + + if ((ubagg_dev == NULL) || (ubagg_dev->jetty_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->jfr_bitmap NULL"); + return -1; + } + id = ubagg_bitmap_alloc_idx(ubagg_dev->jetty_bitmap); + ret = copy_to_user((void __user *)user_ctl->out.addr, (void *)&id, + sizeof(uint32_t)); + if (ret != 0) { + ubagg_log_err("copy to user fail, ret:%d", ret); + return -EFAULT; + } + return ret; +} + +static int ubagg_get_seg_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct ubagg_hash_table *ubagg_seg_ht = NULL; + struct ubagg_seg_hash_node *tmp_seg = NULL; + struct seg_info_req *req = NULL; + + if ((ubagg_dev == NULL) || (ubagg_dev->segment_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->segment_bitmap NULL"); + return -1; + } + + if (user_ctl->in.addr != 0 && + user_ctl->in.len != sizeof(struct seg_info_req)) { + ubagg_log_err("Invalid user in"); + return -1; + } + req = (struct seg_info_req *)user_ctl->in.addr; + + ubagg_seg_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT]; + spin_lock(&ubagg_seg_ht->lock); + tmp_seg = ubagg_hash_table_lookup_nolock(ubagg_seg_ht, req->token_id, + &req->token_id); + if (tmp_seg == NULL) { + spin_unlock(&ubagg_seg_ht->lock); + ubagg_log_err("Failed to find seg.\n"); + return -1; + } + memcpy((void *)user_ctl->out.addr, tmp_seg->ex_info.slaves, + sizeof(tmp_seg->ex_info.slaves)); + spin_unlock(&ubagg_seg_ht->lock); + return 0; +} + +static int ubagg_get_jetty_info(struct ubcore_device *dev, + struct ubcore_user_ctl *user_ctl) +{ + struct ubagg_hash_table *ht = NULL; + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct jetty_info_req *req = NULL; + + if ((ubagg_dev == NULL) || (ubagg_dev->segment_bitmap == NULL)) { + ubagg_log_err("ubagg_dev->segment_bitmap NULL"); + return -1; + } + + if (user_ctl->in.addr != 0 && + user_ctl->in.len != sizeof(struct jetty_info_req)) { + ubagg_log_err("Invalid user in"); + return -1; + } + req = (struct jetty_info_req *)user_ctl->in.addr; + + if (req->is_jfr) { + struct ubagg_jfr_hash_node *tmp_jfr = NULL; + + ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT]; + spin_lock(&ht->lock); + tmp_jfr = ubagg_hash_table_lookup_nolock(ht, req->jetty_id.id, + &req->jetty_id.id); + if (tmp_jfr == NULL) { + spin_unlock(&ht->lock); + ubagg_log_err("Failed to find jfr, jetty_id:%u.\n", + req->jetty_id.id); + return -1; + } + memcpy((void *)user_ctl->out.addr, &tmp_jfr->ex_info, + sizeof(tmp_jfr->ex_info)); + spin_unlock(&ht->lock); + } else { + struct ubagg_jetty_hash_node *tmp_jetty = NULL; + + ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT]; + spin_lock(&ht->lock); + tmp_jetty = ubagg_hash_table_lookup_nolock(ht, req->jetty_id.id, + &req->jetty_id.id); + if (tmp_jetty == NULL) { + spin_unlock(&ht->lock); + ubagg_log_err("Failed to find jetty, jetty_id:%u.\n", + req->jetty_id.id); + return -1; + } + memcpy((void *)user_ctl->out.addr, &tmp_jetty->ex_info, + sizeof(tmp_jetty->ex_info)); + spin_unlock(&ht->lock); + } + return 0; +} + +int ubagg_user_ctl(struct ubcore_device *dev, struct ubcore_user_ctl *user_ctl) +{ + int ret = 0; + + if (dev == NULL || user_ctl == NULL) { + ubagg_log_err("Invalid parameter.\n"); + return -1; + } + + switch (user_ctl->in.opcode) { + case GET_SLAVE_DEVICE: { + ret = ubagg_get_slave_device(dev, user_ctl); + break; + } + case GET_TOPO_INFO: { + ret = ubagg_get_topo_info(dev, user_ctl); + break; + } + case GET_JFR_ID: { + ret = ubagg_get_jfr_id(dev, user_ctl); + break; + } + case GET_JETTY_ID: { + ret = ubagg_get_jetty_id(dev, user_ctl); + break; + } + case GET_SEG_INFO: { + ret = ubagg_get_seg_info(dev, user_ctl); + break; + } + case GET_JETTY_INFO: { + ret = ubagg_get_jetty_info(dev, user_ctl); + break; + } + default: { + ubagg_log_err("unsupported ubagg userctl opcde:%u", + user_ctl->in.opcode); + ret = -ENXIO; + } + } + + return ret; +} + +int ubagg_config_device(struct ubcore_device *dev, + struct ubcore_device_cfg *cfg) +{ + (void)dev; + (void)cfg; + return 0; +} + +static struct ubcore_ucontext * +ubagg_alloc_ucontext(struct ubcore_device *dev, uint32_t eid_index, + struct ubcore_udrv_priv *udrv_data) +{ + (void)dev; + (void)eid_index; + (void)udrv_data; + return kzalloc(sizeof(struct ubcore_ucontext), GFP_KERNEL); +} + +static int ubagg_free_ucontext(struct ubcore_ucontext *uctx) +{ + kfree(uctx); + return 0; +} + +static int ubagg_query_device_attr(struct ubcore_device *dev, + struct ubcore_device_attr *attr) +{ + *attr = dev->attr; + return 0; +} + +struct ubcore_jfc *ubagg_create_jfc(struct ubcore_device *ub_dev, + struct ubcore_jfc_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_jfc *jfc; + int id; + + if (ubagg_dev == NULL || ub_dev == NULL || cfg == NULL || + udata == NULL || udata->uctx == NULL) + return NULL; + + jfc = kzalloc(sizeof(struct ubagg_jfc), GFP_KERNEL); + if (jfc == NULL) + return NULL; + jfc->base.jfc_cfg.depth = cfg->depth; + spin_lock(&ubagg_dev->jfc_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfc_bitmap, ubagg_dev->jfc_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfc_bitmap->lock); + ubagg_log_err("failed to alloc jfc_id"); + kfree(jfc); + return NULL; + } + + jfc->base.id = id; + ubagg_dev->jfc_bitmap->alloc_idx = + (jfc->base.id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfc_bitmap->lock); + ubagg_log_info("ubagg jfc created successfully, id: %u.\n", + jfc->base.id); + return &jfc->base; +} + +int ubagg_destroy_jfc(struct ubcore_jfc *jfc) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfc *ubagg_jfc; + int id; + + if (jfc == NULL || jfc->ub_dev == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfc->ub_dev; + ubagg_jfc = ubagg_container_of(jfc, struct ubagg_jfc, base); + + id = jfc->id; + (void)ubagg_bitmap_free_idx(ubagg_dev->jfc_bitmap, id); + kfree(ubagg_jfc); + ubagg_log_info("ubagg jfc destroyed successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jfs *ubagg_create_jfs(struct ubcore_device *ub_dev, + struct ubcore_jfs_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_jfs *jfs; + int id; + + if (ub_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL) + return NULL; + spin_lock(&ubagg_dev->jfs_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfs_bitmap, ubagg_dev->jfs_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfs_bitmap->lock); + ubagg_log_err("failed to alloc jfs_id: id has been used up.\n"); + return NULL; + } + ubagg_dev->jfs_bitmap->alloc_idx = (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfs_bitmap->lock); + + jfs = kzalloc(sizeof(struct ubagg_jfs), GFP_KERNEL); + if (IS_ERR_OR_NULL(jfs)) { + (void)ubagg_bitmap_free_idx(ubagg_dev->jfs_bitmap, id); + return NULL; + } + + jfs->base.jfs_cfg.depth = cfg->depth; + jfs->base.jfs_cfg.max_sge = cfg->max_sge; + jfs->base.jfs_cfg.max_rsge = cfg->max_rsge; + jfs->base.jfs_cfg.max_inline_data = cfg->max_inline_data; + jfs->base.jfs_cfg.trans_mode = cfg->trans_mode; + jfs->base.jfs_id.id = id; + + ubagg_log_info("ubagg create jfs successfully, id: %u.\n", + jfs->base.jfs_id.id); + return &jfs->base; +} + +int ubagg_destroy_jfs(struct ubcore_jfs *jfs) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfs *ubagg_jfs; + int id; + + if (jfs == NULL || jfs->ub_dev == NULL || jfs->uctx == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfs->ub_dev; + ubagg_jfs = ubagg_container_of(jfs, struct ubagg_jfs, base); + id = jfs->jfs_id.id; + (void)ubagg_bitmap_free_idx(ubagg_dev->jfs_bitmap, id); + kfree(ubagg_jfs); + ubagg_log_info("ubagg destroy jfs_ctx successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jfr *ubagg_create_jfr(struct ubcore_device *ub_dev, + struct ubcore_jfr_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(ub_dev, struct ubagg_device, ub_dev); + struct ubagg_hash_table *ubagg_jfr_ht = NULL; + struct ubagg_jfr_hash_node *tmp_jfr = NULL; + struct ubagg_jfr_hash_node *jfr = NULL; + int ret = 0; + int id; + + if (ub_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL || cfg->id >= UBAGG_BITMAP_MAX_SIZE) + return NULL; + + id = cfg->id; + if (id == 0) { + spin_lock(&ubagg_dev->jfr_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jfr_bitmap, + ubagg_dev->jfr_bitmap->alloc_idx); + if (id == -1) { + spin_unlock(&ubagg_dev->jfr_bitmap->lock); + ubagg_log_err( + "failed to alloc jfr_id: id has been used up.\n"); + return NULL; + } + ubagg_dev->jfr_bitmap->alloc_idx = + (id + 1) % UBAGG_BITMAP_MAX_SIZE == 0 ? + BITMAP_OFFSET : + (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jfr_bitmap->lock); + } else { + if (ubagg_bitmap_use_id(ubagg_dev->jfr_bitmap, id) != 0) { + ubagg_log_err( + "failed to alloc jfr_id: id has been set.\n"); + return NULL; + } + } + + if (id == -1) { + ubagg_log_err("failed to alloc jfr_id: id has been used up.\n"); + return NULL; + } + + jfr = kzalloc(sizeof(struct ubagg_jfr_hash_node), GFP_KERNEL); + if (jfr == NULL) + goto FREE_ID; + + jfr->base.jfr_cfg.depth = cfg->depth; + jfr->base.jfr_cfg.max_sge = cfg->max_sge; + jfr->base.jfr_id.id = id; + jfr->token_id = id; + + ret = copy_from_user(&jfr->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("ubagg fail to copy from user, ret:%d.\n", ret); + goto FREE_JFR; + } + jfr->ex_info.base.id = id; + + ubagg_jfr_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT]; + spin_lock(&ubagg_jfr_ht->lock); + tmp_jfr = ubagg_hash_table_lookup_nolock(ubagg_jfr_ht, id, &id); + if (tmp_jfr != NULL) { + ubagg_log_err("id:%u already exists.\n", id); + // should remove it + ubagg_hash_table_remove_nolock(ubagg_jfr_ht, &tmp_jfr->hnode); + spin_unlock(&ubagg_jfr_ht->lock); + kfree(tmp_jfr); + goto FREE_JFR; + } + + ubagg_hash_table_add_nolock(ubagg_jfr_ht, &jfr->hnode, id); + spin_unlock(&ubagg_jfr_ht->lock); + + ubagg_log_info("ubagg create jfr_ctx successfully, id: %u.\n", + jfr->base.jfr_id.id); + return &jfr->base; + +FREE_JFR: + kfree(jfr); +FREE_ID: + (void)ubagg_bitmap_free_idx(ubagg_dev->jfr_bitmap, id); + + ubagg_log_err("ubagg fail to create jfr.\n"); + return NULL; +} + +int ubagg_destroy_jfr(struct ubcore_jfr *jfr) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_jfr_hash_node *ubagg_jfr; + int id; + + if (jfr == NULL || jfr->ub_dev == NULL || jfr->uctx == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jfr->ub_dev; + ubagg_jfr = ubagg_container_of(jfr, struct ubagg_jfr_hash_node, base); + id = jfr->jfr_id.id; + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_JFR_HT], + &ubagg_jfr->hnode); + (void)ubagg_bitmap_free_idx(ubagg_dev->jfr_bitmap, id); + kfree(ubagg_jfr); + ubagg_log_info("ubagg destroy jfr_ctx successfully, id: %u.\n", id); + return 0; +} + +struct ubcore_jetty *ubagg_create_jetty(struct ubcore_device *dev, + struct ubcore_jetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = + ubagg_container_of(dev, struct ubagg_device, ub_dev); + struct ubagg_hash_table *ubagg_jetty_ht = NULL; + struct ubagg_jetty_hash_node *tmp_jetty = NULL; + struct ubagg_jetty_hash_node *jetty = NULL; + int ret; + int id; + + if (dev == NULL || cfg == NULL || udata == NULL || + cfg->id >= UBAGG_BITMAP_MAX_SIZE) + return NULL; + + id = cfg->id; + if (id == 0) { + spin_lock(&ubagg_dev->jetty_bitmap->lock); + id = ubagg_bitmap_alloc_idx_from_offset_nolock( + ubagg_dev->jetty_bitmap, + ubagg_dev->jetty_bitmap->alloc_idx); + ubagg_log_err("jetty alloc bitmap, idx = %d\n", id); + if (id <= 0) { + spin_unlock(&ubagg_dev->jetty_bitmap->lock); + ubagg_log_err("failed to alloc jetty_id.\n"); + return NULL; + } + ubagg_dev->jetty_bitmap->alloc_idx = + (id + 1) % UBAGG_BITMAP_MAX_SIZE == 0 ? + BITMAP_OFFSET : + (id + 1) % UBAGG_BITMAP_MAX_SIZE; + spin_unlock(&ubagg_dev->jetty_bitmap->lock); + } else { + if (ubagg_bitmap_use_id(ubagg_dev->jetty_bitmap, id) != 0) { + ubagg_log_err( + "failed to alloc jetty_id: id has been set.\n"); + return NULL; + } + } + + if (id == -1) { + ubagg_log_err( + "failed to alloc jetty_id: id has been used up.\n"); + return NULL; + } + + jetty = kzalloc(sizeof(struct ubagg_jetty_hash_node), GFP_KERNEL); + if (jetty == NULL) + goto FREE_ID; + + jetty->base.jetty_cfg = *cfg; + jetty->base.jetty_id.id = id; + jetty->token_id = id; + ret = copy_from_user(&jetty->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("ubagg fail to copy from user, ret:%d.\n", ret); + goto FREE_JETTY; + } + jetty->ex_info.base.id = id; + + ubagg_jetty_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT]; + spin_lock(&ubagg_jetty_ht->lock); + tmp_jetty = ubagg_hash_table_lookup_nolock(ubagg_jetty_ht, id, &id); + if (tmp_jetty != NULL) { + ubagg_log_err("id:%u already exists.\n", id); + // should remove it + ubagg_hash_table_remove_nolock(ubagg_jetty_ht, + &tmp_jetty->hnode); + spin_unlock(&ubagg_jetty_ht->lock); + kfree(tmp_jetty); + goto FREE_ID; + } + + ubagg_hash_table_add_nolock(ubagg_jetty_ht, &jetty->hnode, id); + spin_unlock(&ubagg_jetty_ht->lock); + + ubagg_log_info("ubagg create jetty_ctx successfully, jetty_id: %d\n", + jetty->base.jetty_id.id); + return &jetty->base; + +FREE_JETTY: + kfree(jetty); +FREE_ID: + (void)ubagg_bitmap_free_idx(ubagg_dev->jetty_bitmap, id); + + ubagg_log_err("ubagg fail to create jetty_ctx.\n"); + return NULL; +} + +int ubagg_destroy_jetty(struct ubcore_jetty *jetty) +{ + struct ubagg_jetty_hash_node *ubagg_jetty; + struct ubagg_device *ubagg_dev; + int id; + + if (jetty == NULL) + return -EINVAL; + ubagg_dev = (struct ubagg_device *)jetty->ub_dev; + ubagg_jetty = + ubagg_container_of(jetty, struct ubagg_jetty_hash_node, base); + id = jetty->jetty_id.id; + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_JETTY_HT], + &ubagg_jetty->hnode); + (void)ubagg_bitmap_free_idx(ubagg_dev->jetty_bitmap, id); + kfree(ubagg_jetty); + ubagg_log_info("ubagg destroy jetty successfully, id: %u.\n", id); + return 0; +} + +int ubagg_query_device_status(struct ubcore_device *dev, + struct ubcore_device_status *status) +{ + int i; + + for (i = 0; i < UBCORE_MAX_PORT_CNT; ++i) { + status->port_status[i].state = UBCORE_PORT_ACTIVE; + status->port_status[i].active_mtu = UBCORE_MTU_4096; + status->port_status[i].active_speed = UBCORE_SP_400G; + status->port_status[i].active_width = UBCORE_LINK_X16; + } + return 0; +} + +static struct ubcore_ops g_ubagg_dev_ops = { + .owner = THIS_MODULE, + .driver_name = "ub_agg", + .abi_version = 0, + .user_ctl = ubagg_user_ctl, + .config_device = ubagg_config_device, + .alloc_ucontext = ubagg_alloc_ucontext, + .free_ucontext = ubagg_free_ucontext, + .query_device_attr = ubagg_query_device_attr, + .register_seg = ubagg_register_seg, + .unregister_seg = ubagg_unregister_seg, + .import_seg = ubagg_import_seg, + .unimport_seg = ubagg_unimport_seg, + .create_jfs = ubagg_create_jfs, + .destroy_jfs = ubagg_destroy_jfs, + .create_jfr = ubagg_create_jfr, + .destroy_jfr = ubagg_destroy_jfr, + .create_jetty = ubagg_create_jetty, + .destroy_jetty = ubagg_destroy_jetty, + .create_jfc = ubagg_create_jfc, + .destroy_jfc = ubagg_destroy_jfc, + .import_jfr = ubagg_import_jfr, + .unimport_jfr = ubagg_unimport_jfr, + .import_jetty = ubagg_import_jetty, + .unimport_jetty = ubagg_unimport_jetty, + .query_device_status = ubagg_query_device_status, +}; + +static void set_ubagg_device_attr(struct ubcore_device *dev, + struct ubagg_device_cap *dev_cap) +{ + dev->attr.dev_cap.feature = dev_cap->feature; + dev->attr.dev_cap.max_jfc = dev_cap->max_jfc; + dev->attr.dev_cap.max_jfs = dev_cap->max_jfs; + dev->attr.dev_cap.max_jfr = dev_cap->max_jfr; + dev->attr.dev_cap.max_jetty = dev_cap->max_jetty; + dev->attr.dev_cap.max_jetty_grp = dev_cap->max_jetty_grp; + dev->attr.dev_cap.max_jetty_in_jetty_grp = + dev_cap->max_jetty_in_jetty_grp; + dev->attr.dev_cap.max_jfc_depth = dev_cap->max_jfc_depth; + dev->attr.dev_cap.max_jfs_depth = dev_cap->max_jfs_depth; + dev->attr.dev_cap.max_jfr_depth = dev_cap->max_jfr_depth; + dev->attr.dev_cap.max_jfs_inline_size = dev_cap->max_jfs_inline_size; + dev->attr.dev_cap.max_jfs_sge = dev_cap->max_jfs_sge; + dev->attr.dev_cap.max_jfs_rsge = dev_cap->max_jfs_rsge; + dev->attr.dev_cap.max_jfr_sge = dev_cap->max_jfr_sge; + dev->attr.dev_cap.max_msg_size = dev_cap->max_msg_size; + dev->attr.dev_cap.max_read_size = dev_cap->max_read_size; + dev->attr.dev_cap.max_write_size = dev_cap->max_write_size; + dev->attr.dev_cap.max_cas_size = dev_cap->max_cas_size; + dev->attr.dev_cap.max_swap_size = dev_cap->max_swap_size; + dev->attr.dev_cap.max_fetch_and_add_size = + dev_cap->max_fetch_and_add_size; + dev->attr.dev_cap.max_fetch_and_sub_size = + dev_cap->max_fetch_and_sub_size; + dev->attr.dev_cap.max_fetch_and_and_size = + dev_cap->max_fetch_and_and_size; + dev->attr.dev_cap.max_fetch_and_or_size = + dev_cap->max_fetch_and_or_size; + dev->attr.dev_cap.max_fetch_and_xor_size = + dev_cap->max_fetch_and_xor_size; + dev->attr.dev_cap.atomic_feat = dev_cap->atomic_feat; + dev->attr.dev_cap.trans_mode = dev_cap->trans_mode; + dev->attr.dev_cap.sub_trans_mode_cap = dev_cap->sub_trans_mode_cap; + dev->attr.dev_cap.congestion_ctrl_alg = dev_cap->congestion_ctrl_alg; + dev->attr.dev_cap.ceq_cnt = dev_cap->congestion_ctrl_alg; + dev->attr.dev_cap.max_tp_in_tpg = dev_cap->max_tp_in_tpg; + dev->attr.dev_cap.max_eid_cnt = dev_cap->max_eid_cnt; + dev->attr.dev_cap.page_size_cap = dev_cap->page_size_cap; + dev->attr.dev_cap.max_oor_cnt = dev_cap->max_oor_cnt; + dev->attr.dev_cap.mn = dev_cap->mn; + dev->attr.dev_cap.max_netaddr_cnt = dev_cap->max_netaddr_cnt; +} + +static void ubagg_reserve_jetty_id(struct ubagg_device *dev) +{ + if (ubagg_bitmap_alloc_idx(dev->jfs_bitmap) != 0) + ubagg_log_err("Failed to reserve jfs id = 0.\n"); + + if (ubagg_bitmap_alloc_idx(dev->jfr_bitmap) != 0) + ubagg_log_err("Failed to reserve jfr id = 0.\n"); + + if (ubagg_bitmap_alloc_idx(dev->jetty_bitmap) != 0) + ubagg_log_err("Failed to reserve jetty id = 0.\n"); +} + +static int alloc_ubagg_dev_bitmap(struct ubagg_device *ubagg_dev) +{ + ubagg_dev->jfc_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfc_bitmap == NULL) { + ubagg_log_err("failed alloc jfc bitmap.\n"); + return -1; + } + ubagg_dev->jfs_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfs_bitmap == NULL) { + ubagg_log_err("failed alloc jfs bitmap.\n"); + goto free_jfc_bitmap; + } + ubagg_dev->jfr_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jfr_bitmap == NULL) { + ubagg_log_err("failed alloc jfr bitmap.\n"); + goto free_jfs_bitmap; + } + ubagg_dev->jfr_bitmap->alloc_idx = BITMAP_OFFSET; + ubagg_dev->jetty_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->jetty_bitmap == NULL) { + ubagg_log_err("failed alloc jetty bitmap.\n"); + goto free_jfr_bitmap; + } + ubagg_dev->jetty_bitmap->alloc_idx = BITMAP_OFFSET; + ubagg_dev->segment_bitmap = ubagg_bitmap_alloc(UBAGG_BITMAP_MAX_SIZE); + if (ubagg_dev->segment_bitmap == NULL) { + ubagg_log_err("failed alloc seg bitmap.\n"); + goto free_jetty_bitmap; + } + ubagg_reserve_jetty_id(ubagg_dev); + + return 0; +free_jetty_bitmap: + if (ubagg_dev->jetty_bitmap != NULL) { + kfree(ubagg_dev->jetty_bitmap); + ubagg_dev->jetty_bitmap = NULL; + } +free_jfr_bitmap: + if (ubagg_dev->jfr_bitmap != NULL) { + kfree(ubagg_dev->jfr_bitmap); + ubagg_dev->jfr_bitmap = NULL; + } +free_jfs_bitmap: + if (ubagg_dev->jfs_bitmap != NULL) { + kfree(ubagg_dev->jfs_bitmap); + ubagg_dev->jfs_bitmap = NULL; + } +free_jfc_bitmap: + if (ubagg_dev->jfc_bitmap != NULL) { + kfree(ubagg_dev->jfc_bitmap); + ubagg_dev->jfc_bitmap = NULL; + } + return -1; +} + +static void free_ubagg_dev_bitmap(struct ubagg_device *ubagg_dev) +{ + ubagg_bitmap_free(ubagg_dev->segment_bitmap); + ubagg_bitmap_free(ubagg_dev->jetty_bitmap); + ubagg_bitmap_free(ubagg_dev->jfr_bitmap); + ubagg_bitmap_free(ubagg_dev->jfs_bitmap); + ubagg_bitmap_free(ubagg_dev->jfc_bitmap); +} + +static struct ubagg_device *ubagg_dev_create(struct ubagg_add_dev *arg) +{ + struct ubagg_device *cur, *ubagg_dev = NULL; + unsigned long flags; + int ret, i; + + if (arg->in.slave_dev_num <= 0 || + arg->in.slave_dev_num > UBAGG_MAX_DEV_NUM) { + ubagg_log_err("slave dev num is invalid, slave_dev_num:%d\n", + arg->in.slave_dev_num); + return NULL; + } + + ubagg_dev = kzalloc(sizeof(struct ubagg_device), GFP_KERNEL); + if (ubagg_dev == NULL) + return NULL; + kref_init(&ubagg_dev->ref); + + // init ubagg device + (void)memcpy(ubagg_dev->master_dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->slave_dev_num = arg->in.slave_dev_num; + for (i = 0; i < arg->in.slave_dev_num; i++) { + (void)memcpy(ubagg_dev->slave_dev_name[i], + arg->in.slave_dev_name[i], UBAGG_MAX_DEV_NAME_LEN); + } + + // init ubcore_device + (void)memcpy(ubagg_dev->ub_dev.dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->ub_dev.ops = &g_ubagg_dev_ops; + + ubagg_dev->ub_dev.attr.tp_maintainer = false; + ubagg_dev->ub_dev.attr.dev_cap.max_eid_cnt = UBAGG_DEVICE_MAX_EID_CNT; + set_ubagg_device_attr(&ubagg_dev->ub_dev, &arg->in.dev_attr.dev_cap); + + ret = alloc_ubagg_dev_bitmap(ubagg_dev); + if (ret != 0) { + ubagg_log_err("ubagg alloc bitmap fail\n"); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + + ret = ubcore_register_device(&ubagg_dev->ub_dev); + if (ret != 0) { + ubagg_log_err("ubcore register device fail, name:%s\n", + arg->in.master_dev_name); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + + ubagg_dev->ub_dev.eid_table.eid_entries[0].eid_index = 0; + ubagg_dev->ub_dev.eid_table.eid_entries[0].net = &init_net; + (void)memcpy(&ubagg_dev->ub_dev.eid_table.eid_entries[0].eid, + &arg->in.eid, UBAGG_EID_SIZE); + ubagg_dev->ub_dev.eid_table.eid_entries[0].valid = true; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, arg->in.master_dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubagg_log_err("ubagg dev: %s exists in list\n", + arg->in.master_dev_name); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return NULL; + } + } + list_add_tail(&ubagg_dev->list_node, &g_ubagg_dev_list); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubagg_dev_ref_get(ubagg_dev); + ubagg_log_info("ubagg dev: %s adds to list success\n", + arg->in.master_dev_name); + return ubagg_dev; +} + +static void ubagg_dev_destroy(char *name) +{ + struct ubagg_device *dev = NULL; + unsigned long flags; + bool dev_exist = false; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(dev, &g_ubagg_dev_list, list_node) { + if (strncmp(dev->ub_dev.dev_name, name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + dev_exist = true; + list_del(&dev->list_node); + ubagg_dev_ref_put(dev); + break; + } + } + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + + if (!dev_exist) { + ubagg_log_err("ubagg device %s is not exist in list\n", name); + return; + } + + ubcore_unregister_device(&dev->ub_dev); + free_ubagg_dev_bitmap(dev); + ubagg_dev_ref_put(dev); +} + +static int add_dev(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_device *ubagg_dev; + struct ubagg_add_dev arg; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_add_dev)) { + ubagg_log_err("add bond dev, hdr->args_len:%u is invalid\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + + if (ubagg_dev_exists(arg.in.master_dev_name)) { + ubagg_log_err("ubagg dev already exist, name:%s\n", + arg.in.master_dev_name); + return -EEXIST; + } + ubagg_dev = ubagg_dev_create(&arg); + if (ubagg_dev == NULL) { + ubagg_log_err("ubagg dev create fail, name:%s\n", + arg.in.master_dev_name); + return -1; + } + + if (!try_module_get(THIS_MODULE)) { + ubagg_log_err("try_module_get for ubagg fail.\n"); + goto module_get_fail; + } + return 0; + +module_get_fail: + ubagg_dev_destroy(ubagg_dev->master_dev_name); + return -ENODEV; +} + +static int rmv_dev(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_rmv_dev arg; + struct ubagg_device *ubagg_dev; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_rmv_dev)) { + ubagg_log_err("rmv bond dev, hdr->args_len:%u is invalid\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + + ubagg_dev = ubagg_find_dev_by_name_and_rmv_from_list( + arg.in.master_dev_name); + if (ubagg_dev == NULL) { + ubagg_log_err("ubagg dev not exist, name:%s\n", + arg.in.master_dev_name); + return -ENODEV; + } + ubagg_log_info("rmv ubagg dev from list success\n"); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + module_put(THIS_MODULE); + return 0; +} + +static bool is_eid_valid(const char *eid) +{ + int i; + + for (i = 0; i < EID_LEN; i++) { + if (eid[i] != 0) + return true; + } + return false; +} + +static bool is_bonding_and_primary_eid_valid(struct ubagg_topo_map *topo_map) +{ + int i, j; + bool has_primary_eid = false; + + for (i = 0; i < topo_map->node_num; i++) { + if (!is_eid_valid(topo_map->topo_infos[i].bonding_eid)) + return false; + has_primary_eid = false; + for (j = 0; j < IODIE_NUM; j++) { + if (is_eid_valid(topo_map->topo_infos[i] + .io_die_info[j] + .primary_eid)) + has_primary_eid = true; + } + if (!has_primary_eid) + return false; + } + return true; +} + +static int find_cur_node_index(struct ubagg_topo_map *topo_map, + uint32_t *node_index) +{ + int i; + + for (i = 0; i < topo_map->node_num; i++) { + if (topo_map->topo_infos[i].is_cur_node) { + *node_index = i; + break; + } + } + if (i == topo_map->node_num) { + ubagg_log_err("can not find cur node index\n"); + return -1; + } + return 0; +} + +static bool compare_eids(const char *eid1, const char *eid2) +{ + return memcmp(eid1, eid2, EID_LEN) == 0; +} + +static int update_peer_port_eid(struct ubagg_topo_info *new_topo_info, + struct ubagg_topo_info *old_topo_info) +{ + int i, j; + char *new_peer_port_eid; + char *old_peer_port_eid; + + for (i = 0; i < IODIE_NUM; i++) { + for (j = 0; j < MAX_PORT_NUM; j++) { + if (!is_eid_valid( + new_topo_info->io_die_info[i].port_eid[j])) + continue; + + new_peer_port_eid = + new_topo_info->io_die_info[i].peer_port_eid[j]; + old_peer_port_eid = + old_topo_info->io_die_info[i].peer_port_eid[j]; + + if (!is_eid_valid(new_peer_port_eid)) + continue; + if (is_eid_valid(old_peer_port_eid) && + !compare_eids(new_peer_port_eid, + old_peer_port_eid)) { + ubagg_log_err( + "peer port eid is not same, new: " EID_FMT + ", old: " EID_FMT "\n", + EID_RAW_ARGS(new_peer_port_eid), + EID_RAW_ARGS(old_peer_port_eid)); + return -1; + } + (void)memcpy(old_peer_port_eid, new_peer_port_eid, + EID_LEN); + } + } + return 0; +} + +static int ubagg_update_topo_info(struct ubagg_topo_map *new_topo_map, + struct ubagg_topo_map *old_topo_map) +{ + struct ubagg_topo_info *new_cur_node_info; + struct ubagg_topo_info *old_cur_node_info; + uint32_t new_cur_node_index = 0; + uint32_t old_cur_node_index = 0; + + if (new_topo_map == NULL || old_topo_map == NULL) { + ubagg_log_err("Invalid topo map\n"); + return -EINVAL; + } + if (!is_bonding_and_primary_eid_valid(new_topo_map)) { + ubagg_log_err("Invalid primary eid\n"); + return -EINVAL; + } + if (find_cur_node_index(new_topo_map, &new_cur_node_index) != 0) { + ubagg_log_err("find cur node index failed in new topo map\n"); + return -1; + } + new_cur_node_info = &(new_topo_map->topo_infos[new_cur_node_index]); + if (find_cur_node_index(old_topo_map, &old_cur_node_index) != 0) { + ubagg_log_err("find cur node index failed in old topo map\n"); + return -1; + } + old_cur_node_info = &(old_topo_map->topo_infos[old_cur_node_index]); + + if (update_peer_port_eid(new_cur_node_info, old_cur_node_info) != 0) { + ubagg_log_err("update peer port eid failed\n"); + return -1; + } + return 0; +} + +static bool has_add_dev_by_bonding_eid(const char *bonding_eid) +{ + int i; + + if (bonding_eid == NULL) { + ubagg_log_err("bonding_eid is NULL"); + return false; + } + mutex_lock(&g_name_eid_arr_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (compare_eids(bonding_eid, g_name_eid_arr[i].bonding_eid)) { + mutex_unlock(&g_name_eid_arr_lock); + return true; + } + } + mutex_unlock(&g_name_eid_arr_lock); + return false; +} + +static void fill_add_dev_cfg(struct ubagg_topo_info *topo_info, + struct ubagg_add_dev_by_uvs *arg) +{ + int i, j, k; + + (void)memcpy(&arg->bonding_eid, topo_info->bonding_eid, EID_LEN); + for (i = 0; i < IODIE_NUM; i++) + (void)memcpy(&arg->slave_eid[i].primary_eid, + topo_info->io_die_info[i].primary_eid, EID_LEN); + + for (j = 0; j < IODIE_NUM; j++) { + for (k = 0; k < MAX_PORT_NUM; k++) + (void)memcpy(&arg->slave_eid[j].port_eid[k], + topo_info->io_die_info[j].port_eid[k], + EID_LEN); + } +} + +static void +set_ubagg_device_attr_by_ubcore_cap(struct ubcore_device *dev, + struct ubcore_device_cap *dev_cap) +{ + dev->attr.dev_cap = *dev_cap; +} + +static int init_ubagg_dev(struct ubagg_device *ubagg_dev, + struct ubagg_add_dev_by_uvs *arg) +{ + struct ubcore_device *dev = NULL; + int slave_dev_idx = 0; + int i, j, k; + + // init ubagg device + (void)memcpy(ubagg_dev->master_dev_name, arg->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + for (i = 0; i < IODIE_NUM; i++) { + if (!is_eid_valid((char *)&arg->slave_eid[i].primary_eid.raw)) + continue; + dev = ubcore_get_device_by_eid(&arg->slave_eid[i].primary_eid, + UBCORE_TRANSPORT_UB); + if (dev == NULL) { + ubagg_log_err( + "primary slave %d dev not exist, eid: " EID_FMT + "\n", + i, EID_ARGS(arg->slave_eid[i].primary_eid)); + return -1; + } + if (slave_dev_idx == 0) + set_ubagg_device_attr_by_ubcore_cap(&ubagg_dev->ub_dev, + &dev->attr.dev_cap); + + (void)memcpy(ubagg_dev->slave_dev_name[slave_dev_idx], + dev->dev_name, UBAGG_MAX_DEV_NAME_LEN); + slave_dev_idx++; + } + + for (j = 0; j < IODIE_NUM; j++) { + for (k = 0; k < MAX_PORT_NUM; k++) { + if (!is_eid_valid( + (char *)&arg->slave_eid[j].port_eid[k].raw)) + continue; + dev = ubcore_get_device_by_eid( + &arg->slave_eid[j].port_eid[k], + UBCORE_TRANSPORT_UB); + if (dev == NULL) { + ubagg_log_err( + "port slave %d_%d dev not exist, eid: " EID_FMT + "\n", + j, k, + EID_ARGS( + arg->slave_eid[j].port_eid[k])); + return -1; + } + if (slave_dev_idx == 0) + set_ubagg_device_attr_by_ubcore_cap( + &ubagg_dev->ub_dev, &dev->attr.dev_cap); + + (void)memcpy(ubagg_dev->slave_dev_name[slave_dev_idx], + dev->dev_name, UBAGG_MAX_DEV_NAME_LEN); + slave_dev_idx++; + } + } + + if (slave_dev_idx == 0) { + ubagg_log_err("slave devs is null\n"); + return -1; + } + + ubagg_dev->slave_dev_num = slave_dev_idx; + return 0; +} + +static int init_ubagg_res(struct ubagg_device *ubagg_dev) +{ + int ret = 0; + int i = 0; + int j = 0; + + ret = alloc_ubagg_dev_bitmap(ubagg_dev); + if (ret != 0) { + ubagg_log_err("ubagg alloc bitmap fail\n"); + return ret; + } + + for (i = 0; i < UBAGG_HT_MAX; i++) { + ret = ubagg_hash_table_alloc(&ubagg_dev->ubagg_ht[i], + &g_ubagg_ht_params[i]); + if (ret != 0) { + ubagg_log_err("Fail to init hash map:%d.\n", i); + goto FREE_HMAP; + } + } + + return 0; + +FREE_HMAP: + for (j = 0; j < i; j++) + ubagg_hash_table_free(&ubagg_dev->ubagg_ht[j]); + free_ubagg_dev_bitmap(ubagg_dev); + + return -ENOMEM; +} + +static void uninit_ubagg_res(struct ubagg_device *ubagg_dev) +{ + int i = 0; + + free_ubagg_dev_bitmap(ubagg_dev); + for (i = 0; i < UBAGG_HT_MAX; i++) + ubagg_hash_table_free(&ubagg_dev->ubagg_ht[i]); +} + +static int init_ubagg_ubcore_dev(struct ubagg_device *ubagg_dev, + struct ubagg_add_dev_by_uvs *arg) +{ + int ret = 0; + + (void)memcpy(ubagg_dev->ub_dev.dev_name, arg->master_dev_name, + UBAGG_MAX_DEV_NAME_LEN); + ubagg_dev->ub_dev.ops = &g_ubagg_dev_ops; + ubagg_dev->ub_dev.attr.tp_maintainer = false; + ubagg_dev->ub_dev.attr.dev_cap.max_eid_cnt = UBAGG_DEVICE_MAX_EID_CNT; + + ret = ubcore_register_device(&ubagg_dev->ub_dev); + if (ret != 0) { + ubagg_log_err("ubcore register device fail, name:%s\n", + arg->master_dev_name); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return ret; + } + + ubagg_dev->ub_dev.eid_table.eid_entries[0].eid_index = 0; + ubagg_dev->ub_dev.eid_table.eid_entries[0].net = &init_net; + (void)memcpy(&ubagg_dev->ub_dev.eid_table.eid_entries[0].eid, + &arg->bonding_eid, UBAGG_EID_SIZE); + ubagg_dev->ub_dev.eid_table.eid_entries[0].valid = true; + + return 0; +} + +static int add_dev_to_list(struct ubagg_device *ubagg_dev) +{ + struct ubagg_device *cur = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, ubagg_dev->ub_dev.dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + ubcore_unregister_device(&ubagg_dev->ub_dev); + free_ubagg_dev_bitmap(ubagg_dev); + ubagg_dev_ref_put(ubagg_dev); + return -EEXIST; + } + } + list_add_tail(&ubagg_dev->list_node, &g_ubagg_dev_list); + ubagg_dev_ref_get(ubagg_dev); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return 0; +} + +static void rmv_dev_from_list(struct ubagg_device *ubagg_dev) +{ + struct ubagg_device *cur = NULL; + unsigned long flags; + + spin_lock_irqsave(&g_ubagg_dev_list_lock, flags); + list_for_each_entry(cur, &g_ubagg_dev_list, list_node) { + if (strncmp(cur->ub_dev.dev_name, ubagg_dev->ub_dev.dev_name, + UBAGG_MAX_DEV_NAME_LEN) == 0) { + list_del(&cur->list_node); + ubagg_dev_ref_put(ubagg_dev); + spin_unlock_irqrestore(&g_ubagg_dev_list_lock, flags); + return; + } + } +} + +static int add_dev_by_uvs(struct ubagg_add_dev_by_uvs *arg) +{ + struct ubagg_device *ubagg_dev = NULL; + + if (ubagg_dev_exists(arg->master_dev_name)) { + ubagg_log_err("ubagg dev already exist, name:%s\n", + arg->master_dev_name); + return -EEXIST; + } + + ubagg_dev = kzalloc(sizeof(struct ubagg_device), GFP_KERNEL); + if (ubagg_dev == NULL) + return -ENOMEM; + kref_init(&ubagg_dev->ref); + + if (init_ubagg_dev(ubagg_dev, arg) != 0) { + ubagg_log_err("init ubagg dev fail, name:%s\n", + arg->master_dev_name); + goto PUT_DEV; + } + + if (init_ubagg_res(ubagg_dev) != 0) { + ubagg_log_err("init ubagg res fail, name:%s\n", + arg->master_dev_name); + goto PUT_DEV; + } + + if (init_ubagg_ubcore_dev(ubagg_dev, arg) != 0) { + ubagg_log_err("init ubagg ubcore fail, name:%s\n", + arg->master_dev_name); + goto UNINIT_UBAGG_RES; + } + + if (add_dev_to_list(ubagg_dev) != 0) { + ubagg_log_err("add dev to list fail, name:%s\n", + arg->master_dev_name); + goto UNINIT_UBCORE_DEV; + } + + if (!try_module_get(THIS_MODULE)) { + ubagg_log_err("try_module_get for ubagg fail.\n"); + goto REMOVE_DEV_LIST; + } + return 0; + +REMOVE_DEV_LIST: + rmv_dev_from_list(ubagg_dev); +UNINIT_UBCORE_DEV: + ubcore_unregister_device(&ubagg_dev->ub_dev); +UNINIT_UBAGG_RES: + uninit_ubagg_res(ubagg_dev); +PUT_DEV: + ubagg_dev_ref_put(ubagg_dev); + + return -ENODEV; +} + +static bool is_eid_empty(const char *eid) +{ + int i; + + for (i = 0; i < EID_LEN; i++) { + if (eid[i] != 0) + return false; + } + return true; +} + +static void find_add_master_dev(const char *bondingEid, const char *name) +{ + int i; + int empty_index = -1; + + mutex_lock(&g_name_eid_arr_lock); + for (i = 0; i < UBAGG_MAX_BONDING_DEV_NUM; i++) { + if (is_eid_empty(g_name_eid_arr[i].bonding_eid)) { + empty_index = i; + break; + } + } + if (empty_index == -1) { + mutex_unlock(&g_name_eid_arr_lock); + ubagg_log_err("g_name_eid_arr is full, max dev num is %d", + UBAGG_MAX_BONDING_DEV_NUM); + return; + } + (void)memcpy(g_name_eid_arr[empty_index].bonding_eid, bondingEid, + EID_LEN); + (void)snprintf(g_name_eid_arr[empty_index].master_dev_name, + UBAGG_MAX_DEV_NAME_LEN, "%s", name); + mutex_unlock(&g_name_eid_arr_lock); +} + +static int ubagg_add_dev_by_uvs(struct ubagg_topo_map *topo_map) +{ + struct ubagg_topo_info *cur_node_info; + struct ubagg_add_dev_by_uvs arg = { 0 }; + char *master_dev_name = NULL; + uint32_t cur_node_index = 0; + + if (find_cur_node_index(topo_map, &cur_node_index) != 0) { + ubagg_log_err("find cur node index failed\n"); + return -1; + } + cur_node_info = &(topo_map->topo_infos[cur_node_index]); + + if (has_add_dev_by_bonding_eid(cur_node_info->bonding_eid)) { + ubagg_log_info("has add dev by bonding eid: " EID_FMT "\n", + EID_RAW_ARGS(cur_node_info->bonding_eid)); + return 0; + } + + master_dev_name = generate_master_dev_name(); + if (master_dev_name == NULL) { + ubagg_log_err("generate master dev name failed\n"); + return -1; + } + + (void)snprintf(arg.master_dev_name, UBAGG_MAX_DEV_NAME_LEN, "%s", + master_dev_name); + fill_add_dev_cfg(cur_node_info, &arg); + + if (add_dev_by_uvs(&arg) != 0) { + release_bond_device_id_with_name(master_dev_name); + kfree(master_dev_name); + ubagg_log_err("add ubagg dev by uvs failed\n"); + return -1; + } + find_add_master_dev(cur_node_info->bonding_eid, master_dev_name); + kfree(master_dev_name); + return 0; +} + +static void print_topo_map(struct ubagg_topo_map *topo_map) +{ + int i, j, k; + struct ubagg_topo_info *cur_node_info; + + ubagg_log_info( + "========================== topo map start =============================\n"); + for (i = 0; i < topo_map->node_num; i++) { + cur_node_info = topo_map->topo_infos + i; + if (is_eid_empty(cur_node_info->bonding_eid)) + continue; + + ubagg_log_info( + "===================== node %d start =======================\n", + i); + ubagg_log_info("bonding eid: " EID_FMT "\n", + EID_RAW_ARGS(cur_node_info->bonding_eid)); + for (j = 0; j < IODIE_NUM; j++) { + ubagg_log_info( + "\tprimary eid %d: " EID_FMT "\n", j, + EID_RAW_ARGS(cur_node_info->io_die_info[j] + .primary_eid)); + for (k = 0; k < MAX_PORT_NUM; k++) { + ubagg_log_info( + "\t\tport eid %d: " EID_FMT "\n", k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .port_eid[k])); + ubagg_log_info( + "\t\tpeer_port eid %d: " EID_FMT "\n", + k, + EID_RAW_ARGS( + cur_node_info->io_die_info[j] + .peer_port_eid[k])); + } + } + ubagg_log_info( + "===================== node %d end =======================\n", + i); + } + ubagg_log_info( + "========================== topo map end =============================\n"); +} + +static int ubagg_set_topo_info(struct ubagg_cmd_hdr *hdr) +{ + struct ubagg_set_topo_info arg; + struct ubagg_topo_map *new_topo_map; + struct ubagg_topo_map *topo_map; + int ret; + + if (hdr->args_len != sizeof(struct ubagg_set_topo_info)) { + ubagg_log_err( + "set topo info, args_len is invalid, args_len:%u\n", + hdr->args_len); + return -EINVAL; + } + + ret = copy_from_user(&arg, (void __user *)hdr->args_addr, + hdr->args_len); + if (ret != 0) { + ubagg_log_err("copy_from_user fail."); + return ret; + } + if (arg.in.topo == NULL || arg.in.topo_num == 0 || + arg.in.topo_num > MAX_NODE_NUM) { + ubagg_log_err("Invalid set_topo_info param\n"); + return -EINVAL; + } + topo_map = get_global_ubagg_map(); + if (topo_map == NULL) { + topo_map = create_global_ubagg_topo_map(arg.in.topo, + arg.in.topo_num); + if (topo_map == NULL) { + ubagg_log_err("Failed to create topo map\n"); + return -ENOMEM; + } + if (!is_bonding_and_primary_eid_valid(topo_map)) { + delete_global_ubagg_topo_map(); + ubagg_log_err("Invalid primary eid\n"); + return -EINVAL; + } + } else { + // update topo_map + new_topo_map = create_ubagg_topo_map_from_user(arg.in.topo, + arg.in.topo_num); + if (ubagg_update_topo_info(new_topo_map, topo_map) != 0) { + delete_ubagg_topo_map(new_topo_map); + ubagg_log_err("Failed to update topo info\n"); + return -1; + } + delete_ubagg_topo_map(new_topo_map); + } + + print_topo_map(topo_map); + + if (ubagg_add_dev_by_uvs(topo_map) != 0) { + delete_global_ubagg_topo_map(); + ubagg_log_err("Failed to add dev by uvs\n"); + return -1; + } + return 0; +} + +int ubagg_delete_topo_map(void) +{ + delete_global_ubagg_topo_map(); + return 0; +} + +long ubagg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ubagg_cmd_hdr hdr; + int ret = 0; + + if (cmd != UBAGG_CMD || !capable(CAP_NET_ADMIN)) { + ubagg_log_err("bad ubagg ioctl cmd!"); + return -ENOIOCTLCMD; + } + + ret = copy_from_user(&hdr, (void *)arg, sizeof(struct ubagg_cmd_hdr)); + if (ret != 0) { + ubagg_log_err("copy from user fail, ret:%d", ret); + return -EFAULT; + } + switch (hdr.command) { + case UBAGG_ADD_DEV: + return add_dev(&hdr); + case UBAGG_RMV_DEV: + return rmv_dev(&hdr); + case UBAGG_SET_TOPO_INFO: + return ubagg_set_topo_info(&hdr); + default: + ubagg_log_err("Wrong command type:%u", hdr.command); + return -EINVAL; + } +} diff --git a/drivers/ub/urma/ubagg/ubagg_ioctl.h b/drivers/ub/urma/ubagg/ubagg_ioctl.h new file mode 100644 index 000000000000..6e85f9ab102c --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_ioctl.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg ioctl head file + * Author: Dongxu Li + * Create: 2025-1-26 + * Note: + * History: 2025-1-26: Create file + */ +#ifndef UBAGG_IOCTL_H +#define UBAGG_IOCTL_H + +#include +#include "ubagg_topo_info.h" +#include "ubagg_types.h" + +enum ubagg_cmd { + UBAGG_ADD_DEV = 1, + UBAGG_RMV_DEV, + UBAGG_SET_TOPO_INFO, +}; + +struct ubagg_cmd_hdr { + uint32_t command; + uint32_t args_len; + uint64_t args_addr; +}; + +#define UBAGG_CMD_MAGIC 'B' +#define UBAGG_CMD _IOWR(UBAGG_CMD_MAGIC, 1, struct ubagg_cmd_hdr) + +#define UBAGG_EID_SIZE (16) + +/** A copy of urma_device_cap. + * This module needs user pass `urma_device_cap` in ioctl, + * but it can't include `urma_types.h` in kmod. + * So we copy this structure. + */ +union ubagg_order_type_cap { + struct { + uint32_t ot : 1; + uint32_t oi : 1; + uint32_t ol : 1; + uint32_t no : 1; + uint32_t reserved : 28; + } bs; + uint32_t value; +}; +union ubagg_tp_type_cap { + struct { + uint32_t rtp : 1; + uint32_t ctp : 1; + uint32_t utp : 1; + uint32_t reserved : 29; + } bs; + uint32_t value; +}; + +union ubagg_tp_feature { + struct { + uint32_t rm_multi_path : 1; + uint32_t rc_multi_path : 1; + uint32_t reserved : 30; + } bs; + uint32_t value; +}; + +struct ubagg_device_cap { + union ubcore_device_feat feature; + uint32_t max_jfc; + uint32_t max_jfs; + uint32_t max_jfr; + uint32_t max_jetty; + uint32_t max_jetty_grp; + uint32_t max_jetty_in_jetty_grp; + uint32_t max_jfc_depth; + uint32_t max_jfs_depth; + uint32_t max_jfr_depth; + uint32_t max_jfs_inline_size; + uint32_t max_jfs_sge; + uint32_t max_jfs_rsge; + uint32_t max_jfr_sge; + uint64_t max_msg_size; + uint32_t max_read_size; + uint32_t max_write_size; + uint32_t max_cas_size; + uint32_t max_swap_size; + uint32_t max_fetch_and_add_size; + uint32_t max_fetch_and_sub_size; + uint32_t max_fetch_and_and_size; + uint32_t max_fetch_and_or_size; + uint32_t max_fetch_and_xor_size; + union ubcore_atomic_feat atomic_feat; + uint16_t trans_mode; /* one or more from ubcore_transport_mode_t */ + uint16_t sub_trans_mode_cap; /* one or more from ubcore_sub_trans_mode_cap */ + uint16_t congestion_ctrl_alg; /* one or more mode from ubcore_congestion_ctrl_alg_t */ + uint32_t ceq_cnt; /* completion vector count */ + uint32_t max_tp_in_tpg; + uint32_t max_eid_cnt; + uint64_t page_size_cap; + uint32_t max_oor_cnt; /* max OOR window size by packet */ + uint32_t mn; + uint32_t max_netaddr_cnt; + union ubagg_order_type_cap rm_order_cap; + union ubagg_order_type_cap rc_order_cap; + union ubagg_tp_type_cap rm_tp_cap; + union ubagg_tp_type_cap rc_tp_cap; + union ubagg_tp_type_cap um_tp_cap; + union ubagg_tp_feature tp_feature; +}; +/** A structure mimicking `urma_device_attr`. + * The field `dev_cap` is the same of that in `urma_device_attr`. + */ +struct ubagg_config_dev_attr { + struct ubagg_device_cap dev_cap; +}; + +struct ubagg_add_dev { + struct { + int slave_dev_num; + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; + union ubcore_eid eid; + struct ubagg_config_dev_attr dev_attr; + } in; +}; + +struct ubagg_rmv_dev { + struct { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + } in; +}; + +struct ubagg_set_topo_info { + struct { + void *topo; + uint32_t topo_num; + } in; +}; + +enum ubagg_userctl_opcode { + GET_SLAVE_DEVICE = 1, + GET_TOPO_INFO = 2, + GET_JFR_ID = 3, + GET_JETTY_ID = 4, + GET_SEG_INFO = 5, + GET_JETTY_INFO = 6, +}; + +struct ubagg_slave_device { + int slave_dev_num; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; +}; + +struct ubagg_topo_info_out { + struct ubagg_topo_info topo_info[MAX_NODE_NUM]; + uint32_t node_num; +}; + +int ubagg_delete_topo_map(void); + +struct ubagg_primary_port_eid { + union ubcore_eid primary_eid; + union ubcore_eid port_eid[MAX_PORT_NUM]; +}; + +struct ubagg_add_dev_by_uvs { + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + union ubcore_eid bonding_eid; + struct ubagg_primary_port_eid slave_eid[IODIE_NUM]; +}; + +long ubagg_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#endif // UBAGG_IOCTL_H diff --git a/drivers/ub/urma/ubagg/ubagg_jetty.c b/drivers/ub/urma/ubagg/ubagg_jetty.c new file mode 100644 index 000000000000..dccf67c1dcf4 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_jetty.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg jetty ops implementation + * Author: Wang Hang + * Create: 2025-08-13 + * Note: + * History: 2025-08-13: Create file + */ + +#include "ubagg_jetty.h" +#include "ubagg_log.h" + +struct ubagg_target_jetty { + struct ubcore_tjetty base; +}; + +struct ubcore_tjetty *ubagg_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_target_jetty *tjfr; + + if (dev == NULL || cfg == NULL || udata == NULL || udata->uctx == NULL) + return NULL; + + tjfr = kzalloc(sizeof(struct ubagg_target_jetty), GFP_KERNEL); + if (tjfr == NULL) + return NULL; + ubagg_log_info("Import jfr successfully, is:%u.\n", cfg->id.id); + return &tjfr->base; +} + +int ubagg_unimport_jfr(struct ubcore_tjetty *tjfr) +{ + struct ubagg_target_jetty *ubagg_tjfr; + + if (tjfr == NULL || tjfr->ub_dev == NULL || tjfr->uctx == NULL) { + ubagg_log_err("Invalid parameter.\n"); + return -EINVAL; + } + ubagg_tjfr = (struct ubagg_target_jetty *)tjfr; + ubagg_log_info("Unimport jfr successfully, id:%u.\n", + ubagg_tjfr->base.cfg.id.id); + kfree(ubagg_tjfr); + return 0; +} + +struct ubcore_tjetty *ubagg_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_target_jetty *tjetty; + + if (cfg == NULL || dev == NULL || udata == NULL) + return NULL; + + tjetty = kzalloc(sizeof(struct ubagg_target_jetty), GFP_KERNEL); + if (tjetty == NULL) + return NULL; + ubagg_log_info("Import jetty successfully, %u\n", cfg->id.id); + return &tjetty->base; +} + +int ubagg_unimport_jetty(struct ubcore_tjetty *tjetty) +{ + struct ubagg_target_jetty *ubagg_tjetty; + + if (tjetty == NULL || tjetty->ub_dev == NULL || tjetty->uctx == NULL) + return -EINVAL; + ubagg_tjetty = (struct ubagg_target_jetty *)tjetty; + ubagg_log_info("Unimport jetty successfully, id:%u.\n", + tjetty->cfg.id.id); + kfree(ubagg_tjetty); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_jetty.h b/drivers/ub/urma/ubagg/ubagg_jetty.h new file mode 100644 index 000000000000..26af9f928b43 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_jetty.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg jetty ops header + * Author: Weicheng Zhang + * Create: 2025-08-13 + * Note: + * History: 2025-08-13: Create file + */ + +#ifndef UBAGG_JETTY_H +#define UBAGG_JETTY_H + +#include "ubagg_types.h" + +struct ubcore_tjetty *ubagg_import_jfr(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_jfr(struct ubcore_tjetty *tjfr); + +struct ubcore_tjetty *ubagg_import_jetty(struct ubcore_device *dev, + struct ubcore_tjetty_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_jetty(struct ubcore_tjetty *tjetty); + +#endif // UBAGG_SEG_H diff --git a/drivers/ub/urma/ubagg/ubagg_log.c b/drivers/ub/urma/ubagg/ubagg_log.c new file mode 100644 index 000000000000..fb9eac7a4477 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_log.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg log file + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2024-1-14: Create file + */ + +#include +#include "ubagg_log.h" + +uint32_t g_ubagg_log_level = UBAGG_LOG_LEVEL_WARNING; diff --git a/drivers/ub/urma/ubagg/ubagg_log.h b/drivers/ub/urma/ubagg/ubagg_log.h new file mode 100644 index 000000000000..8381ca7b29b1 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_log.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg log head file + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ + +#ifndef UBAGG_LOG_H +#define UBAGG_LOG_H + +#include +#include + +enum ubagg_log_level { + UBAGG_LOG_LEVEL_EMERG = 0, + UBAGG_LOG_LEVEL_ALERT = 1, + UBAGG_LOG_LEVEL_CRIT = 2, + UBAGG_LOG_LEVEL_ERR = 3, + UBAGG_LOG_LEVEL_WARNING = 4, + UBAGG_LOG_LEVEL_NOTICE = 5, + UBAGG_LOG_LEVEL_INFO = 6, + UBAGG_LOG_LEVEL_DEBUG = 7, + UBAGG_LOG_LEVEL_MAX = 8, +}; + +/* add log head info, "LogTag_UBAGG|function|[line]| */ +#define UBAGG_LOG_TAG "LogTag_UBAGG" +#define ubagg_log(l, format, args...) \ + pr_##l("%s|%s:[%d]|" format, UBAGG_LOG_TAG, __func__, __LINE__, ##args) + +#define UBAGG_RATELIMIT_INTERVAL (5 * HZ) +#define UBAGG_RATELIMIT_BURST 100 + +extern uint32_t g_ubagg_log_level; + +#define ubagg_log_info(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_INFO) \ + ubagg_log(info, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_err(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_ERR) \ + ubagg_log(err, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_warn(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_WARNING) \ + ubagg_log(warn, __VA_ARGS__); \ + } while (0) + +/* No need to record debug log by printk_ratelimited */ +#define ubagg_log_debug(...) \ + do { \ + if (g_ubagg_log_level >= UBAGG_LOG_LEVEL_DEBUG) \ + ubagg_log(debug, __VA_ARGS__); \ + } while (0) + +/* Rate Limited log to avoid soft lockup crash by quantities of printk */ +/* Current limit is 100 log every 5 seconds */ +#define ubagg_log_info_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_INFO)) \ + ubagg_log(info, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_err_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_ERR)) \ + ubagg_log(err, __VA_ARGS__); \ + } while (0) + +#define ubagg_log_warn_rl(...) \ + do { \ + static DEFINE_RATELIMIT_STATE(_rs, UBAGG_RATELIMIT_INTERVAL, \ + UBAGG_RATELIMIT_BURST); \ + if ((__ratelimit(&_rs)) && \ + (g_ubagg_log_level >= UBAGG_LOG_LEVEL_WARNING)) \ + ubagg_log(warn, __VA_ARGS__); \ + } while (0) + +#endif /* UBAGG_LOG_H */ diff --git a/drivers/ub/urma/ubagg/ubagg_main.c b/drivers/ub/urma/ubagg/ubagg_main.c new file mode 100644 index 000000000000..0d94e28ee0d5 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_main.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Dongxu Li + * Create: 2025-1-14 + * Note: + * History: 2025-1-14: Create file + */ + +#include +#include +#include +#include + +#include "ubagg_log.h" +#include "ubagg_ioctl.h" +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_hash_table.h" + +#define UBAGG_MODULE_NAME "ubagg" +#define UBAGG_DEVNODE_MODE (0666) +#define UBAGG_DEVICE_NAME UBAGG_MODULE_NAME +#define UBAGG_LOG_FILE_PERMISSION (0644) + +module_param(g_ubagg_log_level, uint, UBAGG_LOG_FILE_PERMISSION); +MODULE_PARM_DESC(g_ubagg_log_level, + " 3: ERR, 4: WARNING, 5:NOTICE, 6: INFO, 7: DEBUG"); + +struct ubagg_ctx { + dev_t ubagg_devno; + struct cdev ubagg_cdev; + struct device *ubagg_dev; +}; + +static struct ubagg_ctx g_ubagg_ctx = { 0 }; + +static int ubagg_open(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static int ubagg_close(struct inode *i_node, struct file *filp) +{ + return 0; +} + +static const struct file_operations g_ubagg_ops = { + .owner = THIS_MODULE, + .open = ubagg_open, + .release = ubagg_close, + .unlocked_ioctl = ubagg_ioctl, + .compat_ioctl = ubagg_ioctl, +}; + +static char *ubagg_devnode(const struct device *dev, umode_t *mode) + +{ + if (mode) + *mode = UBAGG_DEVNODE_MODE; + + return kasprintf(GFP_KERNEL, "%s", dev_name(dev)); +} + +static struct class g_ubagg_class = { + .name = UBAGG_MODULE_NAME, + .devnode = ubagg_devnode, +}; + +static int ubagg_cdev_create(void) +{ + int ret; + + ret = alloc_chrdev_region(&g_ubagg_ctx.ubagg_devno, 0, 1, + UBAGG_MODULE_NAME); + if (ret != 0) { + ubagg_log_err("alloc chrdev no for ubagg fail.\n"); + return ret; + } + + /* create /sys/class/ubagg */ + ret = class_register(&g_ubagg_class); + if (ret) { + ubagg_log_err("couldn't create class %s.\n", UBAGG_MODULE_NAME); + goto unregister_devno; + } + + cdev_init(&g_ubagg_ctx.ubagg_cdev, &g_ubagg_ops); + g_ubagg_ctx.ubagg_cdev.owner = THIS_MODULE; + + ret = cdev_add(&g_ubagg_ctx.ubagg_cdev, g_ubagg_ctx.ubagg_devno, 1); + if (ret != 0) { + ubagg_log_err("ubagg chrdev add failed, ret:%d.\n", ret); + goto class_unregister; + } + + /* /dev/ubagg */ + g_ubagg_ctx.ubagg_dev = device_create(&g_ubagg_class, NULL, + g_ubagg_ctx.ubagg_devno, NULL, + UBAGG_DEVICE_NAME); + if (IS_ERR(g_ubagg_ctx.ubagg_dev)) { + ret = (int)PTR_ERR(g_ubagg_ctx.ubagg_dev); + ubagg_log_err("couldn't create device %s, ret:%d.\n", + UBAGG_DEVICE_NAME, ret); + g_ubagg_ctx.ubagg_dev = NULL; + goto cdev_del; + } + ubagg_log_info("ubagg cdev,device and class created success.\n"); + + return 0; + +cdev_del: + cdev_del(&g_ubagg_ctx.ubagg_cdev); +class_unregister: + class_unregister(&g_ubagg_class); +unregister_devno: + unregister_chrdev_region(g_ubagg_ctx.ubagg_devno, 1); + + return ret; +} + +static void ubagg_cdev_destroy(void) +{ + device_destroy(&g_ubagg_class, g_ubagg_ctx.ubagg_cdev.dev); + g_ubagg_ctx.ubagg_dev = NULL; + cdev_del(&g_ubagg_ctx.ubagg_cdev); + class_unregister(&g_ubagg_class); + unregister_chrdev_region(g_ubagg_ctx.ubagg_devno, 1); +} + +static int __init ubagg_init(void) +{ + int ret = 0; + + ret = ubagg_cdev_create(); + if (ret != 0) { + ubagg_log_err("create cdev fail."); + return ret; + } + + return 0; +} + +static void __exit ubagg_exit(void) +{ + ubagg_delete_topo_map(); + ubagg_cdev_destroy(); +} + +module_init(ubagg_init); +module_exit(ubagg_exit); + +MODULE_DESCRIPTION("Kernel module for ubus"); +MODULE_AUTHOR("huawei"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ub/urma/ubagg/ubagg_seg.c b/drivers/ub/urma/ubagg/ubagg_seg.c new file mode 100644 index 000000000000..3011a7782404 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_seg.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#include "ubagg_seg.h" +#include "ubagg_bitmap.h" +#include "ubagg_log.h" + +int ubagg_unregister_seg(struct ubcore_target_seg *seg) +{ + struct ubagg_seg_hash_node *seg_node = NULL; + struct ubagg_device *ubagg_dev = NULL; + + if (!seg || !seg->ub_dev) { + ubagg_log_err("Invalid param.\n"); + return -EINVAL; + } + + ubagg_dev = to_ubagg_dev(seg->ub_dev); + seg_node = (struct ubagg_seg_hash_node *)seg; + + ubagg_hash_table_remove(&ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT], + &seg_node->hnode); + ubagg_bitmap_free_idx(ubagg_dev->segment_bitmap, seg_node->token_id); + kfree(seg_node); + return 0; +} + +struct ubcore_target_seg *ubagg_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_seg_hash_node *seg_node = NULL; + struct ubagg_seg_hash_node *tmp_seg = NULL; + struct ubagg_hash_table *ubagg_seg_ht = NULL; + struct ubagg_device *ubagg_dev = NULL; + int token_id = -1; + int ret = 0; + + if (!dev || !cfg || !udata) { + ubagg_log_err("Invalid param.\n"); + return ERR_PTR(-EINVAL); + } + + ubagg_dev = to_ubagg_dev(dev); + seg_node = kzalloc(sizeof(struct ubagg_seg_hash_node), GFP_KERNEL); + if (!seg_node) + return ERR_PTR(-ENOMEM); + + token_id = ubagg_bitmap_alloc_idx(ubagg_dev->segment_bitmap); + if (token_id < 0) { + ubagg_log_err("Fail to alloc token id.\n"); + goto FREE_SEG_NODE; + } + + seg_node->ubagg_seg.seg.token_id = token_id; + seg_node->token_id = token_id; + seg_node->ubagg_seg.ub_dev = dev; + ret = copy_from_user(&seg_node->ex_info, + (void __user *)udata->udrv_data->in_addr, + udata->udrv_data->in_len); + if (ret != 0) { + ubagg_log_err("Fail to copy data from user space, ret:%d.\n", + ret); + goto FREE_TOKEN_ID; + } + + ubagg_seg_ht = &ubagg_dev->ubagg_ht[UBAGG_HT_SEGMENT_HT]; + spin_lock(&ubagg_seg_ht->lock); + tmp_seg = ubagg_hash_table_lookup_nolock(ubagg_seg_ht, token_id, + &token_id); + if (tmp_seg != NULL) { + // should remove it + ubagg_hash_table_remove_nolock(ubagg_seg_ht, &tmp_seg->hnode); + spin_unlock(&ubagg_seg_ht->lock); + ubagg_log_err("Token id already exists.\n"); + kfree(tmp_seg); + goto FREE_TOKEN_ID; + } + + ubagg_hash_table_add_nolock(ubagg_seg_ht, &seg_node->hnode, token_id); + spin_unlock(&ubagg_seg_ht->lock); + + return &seg_node->ubagg_seg; + +FREE_TOKEN_ID: + ubagg_bitmap_free_idx(ubagg_dev->segment_bitmap, token_id); +FREE_SEG_NODE: + kfree(seg_node); + return ERR_PTR(-EINVAL); +} + +struct ubcore_target_seg *ubagg_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata) +{ + struct ubagg_device *ubagg_dev = to_ubagg_dev(dev); + struct ubcore_target_seg *tseg; + + if (ubagg_dev == NULL || cfg == NULL || udata == NULL || + udata->uctx == NULL) { + ubagg_log_err("Invalid param"); + return NULL; + } + + tseg = kzalloc(sizeof(struct ubcore_target_seg), GFP_KERNEL); + if (tseg == NULL) + return NULL; + + return tseg; +} + +int ubagg_unimport_seg(struct ubcore_target_seg *tseg) +{ + if (tseg == NULL || tseg->ub_dev == NULL || tseg->uctx == NULL) { + ubagg_log_err("Invalid param"); + return -EINVAL; + } + + kfree(tseg); + return 0; +} diff --git a/drivers/ub/urma/ubagg/ubagg_seg.h b/drivers/ub/urma/ubagg/ubagg_seg.h new file mode 100644 index 000000000000..37c2664b721e --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_seg.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_SEG_H +#define UBAGG_SEG_H + +#include "ubagg_types.h" +#include "ubagg_hash_table.h" + +struct ubcore_target_seg *ubagg_register_seg(struct ubcore_device *dev, + struct ubcore_seg_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unregister_seg(struct ubcore_target_seg *seg); + +struct ubcore_target_seg *ubagg_import_seg(struct ubcore_device *dev, + struct ubcore_target_seg_cfg *cfg, + struct ubcore_udata *udata); + +int ubagg_unimport_seg(struct ubcore_target_seg *tseg); + +int ubagg_init_seg_bitmap(void); + +int ubagg_init_seg_ht(void); + +void ubagg_uninit_seg_bitmap(void); + +void ubagg_uninit_seg_ht(void); + +#endif // UBAGG_SEG_H diff --git a/drivers/ub/urma/ubagg/ubagg_topo_info.c b/drivers/ub/urma/ubagg/ubagg_topo_info.c new file mode 100644 index 000000000000..a7871a2dd5f8 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_topo_info.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg topo info file + * Author: Ma Chuan + * Create: 2025-06-07 + * Note: + * History: 2025-06-07 Create file + */ +#include +#include +#include "ubagg_log.h" +#include "ubagg_topo_info.h" + +static struct ubagg_topo_map *g_topo_map; + +struct ubagg_topo_map * +create_global_ubagg_topo_map(struct ubagg_topo_info *topo_infos, + uint32_t node_num) +{ + g_topo_map = create_ubagg_topo_map_from_user(topo_infos, node_num); + return g_topo_map; +} + +void delete_global_ubagg_topo_map(void) +{ + if (g_topo_map == NULL) + return; + delete_ubagg_topo_map(g_topo_map); + g_topo_map = NULL; +} + +struct ubagg_topo_map *get_global_ubagg_map(void) +{ + return g_topo_map; +} + +struct ubagg_topo_map * +create_ubagg_topo_map_from_user(struct ubagg_topo_info *user_topo_infos, + uint32_t node_num) +{ + struct ubagg_topo_map *topo_map = NULL; + int ret = 0; + + if (user_topo_infos == NULL || node_num <= 0 || + node_num > MAX_NODE_NUM) { + ubagg_log_err("Invalid param\n"); + return NULL; + } + topo_map = kzalloc(sizeof(struct ubagg_topo_map), GFP_KERNEL); + if (topo_map == NULL) + return NULL; + ret = copy_from_user(topo_map->topo_infos, + (void __user *)user_topo_infos, + sizeof(struct ubagg_topo_info) * node_num); + if (ret != 0) { + ubagg_log_err("Failed to copy topo infos\n"); + kfree(topo_map); + return NULL; + } + topo_map->node_num = node_num; + return topo_map; +} + +void delete_ubagg_topo_map(struct ubagg_topo_map *topo_map) +{ + if (topo_map == NULL) + return; + kfree(topo_map); +} diff --git a/drivers/ub/urma/ubagg/ubagg_topo_info.h b/drivers/ub/urma/ubagg/ubagg_topo_info.h new file mode 100644 index 000000000000..d22b9b70e2b8 --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_topo_info.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg topo info head file + * Author: Ma Chuan + * Create: 2025-06-07 + * Note: + * History: 2025-06-07 Create file + */ +#ifndef UBAGG_TOPO_INFO_H +#define UBAGG_TOPO_INFO_H + +#include + +#define EID_LEN (16) +#define MAX_PORT_NUM (9) +#define MAX_NODE_NUM (16) +#define IODIE_NUM (2) + +struct ubagg_iodie_info { + char primary_eid[EID_LEN]; + char port_eid[MAX_PORT_NUM][EID_LEN]; + char peer_port_eid[MAX_PORT_NUM][EID_LEN]; + int socket_id; +}; + +struct ubagg_topo_info { + char bonding_eid[EID_LEN]; + struct ubagg_iodie_info io_die_info[IODIE_NUM]; + bool is_cur_node; +}; + +struct ubagg_topo_map { + struct ubagg_topo_info topo_infos[MAX_NODE_NUM]; + uint32_t node_num; +}; + +struct ubagg_topo_map * +create_global_ubagg_topo_map(struct ubagg_topo_info *topo_infos, + uint32_t node_num); + +void delete_global_ubagg_topo_map(void); + +struct ubagg_topo_map *get_global_ubagg_map(void); + +struct ubagg_topo_map * +create_ubagg_topo_map_from_user(struct ubagg_topo_info *topo_infos, + uint32_t node_num); + +void delete_ubagg_topo_map(struct ubagg_topo_map *topo_map); +#endif // UBAGG_TOPO_INFO_H diff --git a/drivers/ub/urma/ubagg/ubagg_types.h b/drivers/ub/urma/ubagg/ubagg_types.h new file mode 100644 index 000000000000..663023f5ac9a --- /dev/null +++ b/drivers/ub/urma/ubagg/ubagg_types.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * + * Description: ubagg kernel module + * Author: Weicheng Zhang + * Create: 2025-8-6 + * Note: + * History: 2025-8-6: Create file + */ + +#ifndef UBAGG_TYPE_H +#define UBAGG_TYPE_H + +#include +#include + +#define UBAGG_DEV_MAX_NUM (20) +#define UBAGG_BITMAP_SIZE (10240) +#define UBAGG_MAX_DEV_NAME_LEN (64) +#define UBAGG_MAX_DEV_NUM (20) +#define ubagg_container_of(ptr, type, member) \ + (((ptr) == NULL) ? NULL : container_of(ptr, type, member)) + +enum ubagg_ht_param_num { + UBAGG_HT_SEGMENT_HT, + UBAGG_HT_JETTY_HT, + UBAGG_HT_JFR_HT, + UBAGG_HT_MAX, +}; + +struct ubagg_ht_param { + uint32_t size; + uint32_t node_offset; /* offset of hlist node in the hash table object */ + uint32_t key_offset; + uint32_t key_size; +}; + +struct ubagg_hash_table { + struct hlist_head *head; + struct ubagg_ht_param p; + spinlock_t lock; + struct kref kref; +}; + +struct ubagg_ubva { + union ubcore_eid eid; + uint32_t uasid; + uint64_t va; +} __packed; + +struct ubagg_seg_info { + struct ubagg_ubva ubva; + uint64_t len; + union ubcore_seg_attr attr; + uint32_t token_id; +}; + +// must be consistent with urma_bond_seg_info_out_t +struct ubagg_seg_exchange_info { + struct ubagg_seg_info base; + struct ubagg_seg_info slaves[UBAGG_DEV_MAX_NUM]; + int dev_num; +}; + +struct ubagg_seg_hash_node { + // ubagg_seg must be first! + struct ubcore_target_seg ubagg_seg; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_seg_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jetty_id { + union ubcore_eid eid; + uint32_t uasid; + uint32_t id; +}; + +struct ubagg_jetty_exchange_info { + struct ubagg_jetty_id base; + struct ubagg_jetty_id slaves[UBAGG_DEV_MAX_NUM]; + int dev_num; + bool is_in_matrix_server; + bool is_multipath; +}; + +struct ubagg_jetty_hash_node { + // base must be first! + struct ubcore_jetty base; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_jetty_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jfr_hash_node { + // base must be first! + struct ubcore_jfr base; + // unaccessable for ubcore + uint32_t token_id; // key + struct ubagg_jetty_exchange_info ex_info; + struct hlist_node hnode; +}; + +struct ubagg_jfc { + struct ubcore_jfc base; +}; + +struct ubagg_jfs { + struct ubcore_jfs base; +}; + +struct ubagg_device { + struct ubcore_device ub_dev; + char master_dev_name[UBAGG_MAX_DEV_NAME_LEN]; + int slave_dev_num; + char slave_dev_name[UBAGG_MAX_DEV_NUM][UBAGG_MAX_DEV_NAME_LEN]; + struct ubagg_hash_table ubagg_ht[UBAGG_HT_MAX]; + struct ubagg_bitmap *segment_bitmap; + struct ubagg_bitmap *jfs_bitmap; + struct ubagg_bitmap *jfr_bitmap; + struct ubagg_bitmap *jfc_bitmap; + struct ubagg_bitmap *jetty_bitmap; + struct list_head list_node; + struct kref ref; +}; + +static inline struct ubagg_device * +to_ubagg_dev(const struct ubcore_device *ub_dev) +{ + return (struct ubagg_device *)ubagg_container_of( + ub_dev, struct ubagg_device, ub_dev); +} + +#endif // UBAGG_TYPE_H -- Gitee