From 3fb8b7d415394192b312b6b5746cff24e058d4c6 Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Fri, 31 Oct 2025 22:29:57 +0800 Subject: [PATCH 01/10] ub: ubase: add support of ubase driver for ub network drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------- UB is a new interconnection protocol, and ubase is the base driver for all network service. This patch adds the support of UBASE driver and also includes the headers and functions needed by ubase and other drivers like unic, udma, cdma, fwctl and pmu, which means its foundation use. UBASE driver is one of the base driver for ub network, which includes ubus device management, resource management, auxiliary device management, entity management, query the specific capabilities of the device and so on, which support the drivers listed as unic, udma, cdma, fwctl and pmu supposed to insmod and use its basic capabilities. In the same time, this patch support to compile ubase.ko with Makefile, Kconfig and Makefile under drivers/ub added. Signed-off-by: Haiqing Fang Signed-off-by: Haibin Lu Signed-off-by: Yixi Shen Signed-off-by: Xiaobo Zhang Signed-off-by: Jianqiang Li Signed-off-by: Chuan Wu Signed-off-by: Zihao Sheng Signed-off-by: Junxin Chen Signed-off-by: Fengyan Mu Signed-off-by: Xiongchuan Zhou --- arch/arm64/configs/openeuler_defconfig | 4 + drivers/ub/Kconfig | 1 + drivers/ub/Makefile | 1 + drivers/ub/ubase/Kconfig | 15 + drivers/ub/ubase/Makefile | 16 + drivers/ub/ubase/debugfs/ubase_debugfs.c | 165 ++++++ drivers/ub/ubase/debugfs/ubase_debugfs.h | 28 + drivers/ub/ubase/ubase.h | 45 ++ drivers/ub/ubase/ubase_cmd.c | 87 +++ drivers/ub/ubase/ubase_cmd.h | 18 + drivers/ub/ubase/ubase_dev.c | 656 +++++++++++++++++++++++ drivers/ub/ubase/ubase_dev.h | 288 ++++++++++ drivers/ub/ubase/ubase_eq.c | 158 ++++++ drivers/ub/ubase/ubase_eq.h | 75 +++ drivers/ub/ubase/ubase_main.c | 39 ++ drivers/ub/ubase/ubase_reset.h | 14 + drivers/ub/ubase/ubase_ubus.c | 371 +++++++++++++ drivers/ub/ubase/ubase_ubus.h | 46 ++ include/ub/ubase/ubase_comm_cmd.h | 23 + include/ub/ubase/ubase_comm_debugfs.h | 40 ++ include/ub/ubase/ubase_comm_dev.h | 224 ++++++++ include/ub/ubase/ubase_comm_eq.h | 53 ++ include/ub/ubase/ubase_comm_hw.h | 53 ++ include/ub/ubase/ubase_comm_stats.h | 119 ++++ 24 files changed, 2539 insertions(+) create mode 100644 drivers/ub/ubase/Kconfig create mode 100644 drivers/ub/ubase/Makefile create mode 100644 drivers/ub/ubase/debugfs/ubase_debugfs.c create mode 100644 drivers/ub/ubase/debugfs/ubase_debugfs.h create mode 100644 drivers/ub/ubase/ubase.h create mode 100644 drivers/ub/ubase/ubase_cmd.c create mode 100644 drivers/ub/ubase/ubase_cmd.h create mode 100644 drivers/ub/ubase/ubase_dev.c create mode 100644 drivers/ub/ubase/ubase_dev.h create mode 100644 drivers/ub/ubase/ubase_eq.c create mode 100644 drivers/ub/ubase/ubase_eq.h create mode 100644 drivers/ub/ubase/ubase_main.c create mode 100644 drivers/ub/ubase/ubase_reset.h create mode 100644 drivers/ub/ubase/ubase_ubus.c create mode 100644 drivers/ub/ubase/ubase_ubus.h create mode 100644 include/ub/ubase/ubase_comm_cmd.h create mode 100644 include/ub/ubase/ubase_comm_debugfs.h create mode 100644 include/ub/ubase/ubase_comm_dev.h create mode 100644 include/ub/ubase/ubase_comm_eq.h create mode 100644 include/ub/ubase/ubase_comm_hw.h create mode 100644 include/ub/ubase/ubase_comm_stats.h diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index e80fc0985a50..73b75763afd8 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -8332,4 +8332,8 @@ CONFIG_UB_UBUS_BUS=m CONFIG_UB_UBFI=m CONFIG_UB_UBUS_USI=y CONFIG_ARM_GIC_V3_ITS_UBUS=y + +# UB BASE driver +CONFIG_UB_UBASE=m + # end of unified bus diff --git a/drivers/ub/Kconfig b/drivers/ub/Kconfig index d876a10ca180..9755ce29deea 100644 --- a/drivers/ub/Kconfig +++ b/drivers/ub/Kconfig @@ -15,4 +15,5 @@ menuconfig UB if UB source "drivers/ub/ubus/Kconfig" source "drivers/ub/ubfi/Kconfig" +source "drivers/ub/ubase/Kconfig" endif # UB diff --git a/drivers/ub/Makefile b/drivers/ub/Makefile index 7860f8e74a47..72eead96aa95 100644 --- a/drivers/ub/Makefile +++ b/drivers/ub/Makefile @@ -2,3 +2,4 @@ obj-y += ubus/ obj-y += ubfi/ +obj-$(CONFIG_UB_UBASE) += ubase/ diff --git a/drivers/ub/ubase/Kconfig b/drivers/ub/ubase/Kconfig new file mode 100644 index 000000000000..c9137f193a47 --- /dev/null +++ b/drivers/ub/ubase/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# ubase configuration +# + +menuconfig UB_UBASE + default n + tristate "UBASE support" + depends on UB_UBUS_BUS && UB_UBUS_USI + depends on UB_UMMU_CORE_DRIVER + help + This option enables support for the ubase module of Unifiedbus, + including support for getting resource and create auxiliary bus + for upper Unifiedbus modules like unic, udma and cdma. + Say 'Y' here if you want to enable Unifiedbus upper module. diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile new file mode 100644 index 000000000000..17069b9f624e --- /dev/null +++ b/drivers/ub/ubase/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +## Makefile for the Hisilicon ubase drivers. +# +## + +ccflags-y += -I$(srctree)/$(src) +ccflags-y += -I$(srctree)/drivers/ub/ubase/debugfs + +MODULE_NAME := ubase + +UBASE_OBJS := ubase_main.o ubase_dev.o ubase_cmd.o \ + debugfs/ubase_debugfs.o ubase_eq.o ubase_ubus.o + +$(MODULE_NAME)-objs := $(UBASE_OBJS) +obj-$(CONFIG_UB_UBASE) := ubase.o diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c new file mode 100644 index 000000000000..75bf029f17a5 --- /dev/null +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include +#include + +#include "ubase_dev.h" +#include "ubase_debugfs.h" + +static struct dentry *ubase_dbgfs_root; + +static bool __ubase_dbg_dentry_support(struct device *dev, u32 property) +{ + struct ubase_dev *udev = dev_get_drvdata(dev); + + if (((property & UBASE_SUP_UNIC) && ubase_dev_unic_supported(udev)) || + ((property & UBASE_SUP_UDMA) && ubase_dev_udma_supported(udev)) || + ((property & UBASE_SUP_CDMA) && ubase_dev_cdma_supported(udev)) || + ((property & UBASE_SUP_PMU) && ubase_dev_pmu_supported(udev))) { + if (((property & UBASE_SUP_UBL) && ubase_dev_ubl_supported(udev)) || + ((property & UBASE_SUP_ETH) && ubase_dev_eth_mac_supported(udev))) + return true; + } + + return false; +} + +static struct ubase_dbg_dentry_info ubase_dbg_dentry[] = { + /* ue debugfs top-level directory, + * "dev_name" refers to the ue name + */ + { + .name = "dev_name", + .property = UBASE_SUP_URMA | UBASE_SUP_CDMA | UBASE_SUP_PMU | + UBASE_SUP_UBL_ETH, + .support = __ubase_dbg_dentry_support, + }, +}; + +static struct ubase_dbg_cmd_info ubase_dbg_cmd[] = { +}; + +static int ubase_dbg_create_dir(struct device *dev, + struct ubase_dbg_dentry_info *dirs, u32 root_idx) +{ + u32 i; + + for (i = 0; i < root_idx; i++) { + if (!dirs[i].support(dev, dirs[i].property)) + continue; + + dirs[i].dentry = debugfs_create_dir(dirs[i].name, + dirs[root_idx].dentry); + if (IS_ERR(dirs[i].dentry)) { + dev_err(dev, "failed to create %s dir.\n", dirs[i].name); + return PTR_ERR(dirs[i].dentry); + } + } + + return 0; +} + +static int ubase_dbg_create_file(struct device *dev, struct ubase_dbgfs *dbgfs, + struct ubase_dbg_dentry_info *dirs) +{ + struct ubase_dbg_cmd_info *cmd_info; + int i, ret; + + cmd_info = dbgfs->cmd_info; + for (i = 0; i < dbgfs->cmd_info_size; i++) { + if (!cmd_info[i].support(dev, cmd_info[i].property)) + continue; + + ret = cmd_info[i].init(dev, dirs, dbgfs, i); + if (ret) { + dev_err(dev, "failed to init cmd %s, ret = %d.\n", + cmd_info[i].name, ret); + return ret; + } + } + + return 0; +} + +int ubase_dbg_create_dentry(struct device *dev, struct ubase_dbgfs *dbgfs, + struct ubase_dbg_dentry_info *dirs, u32 root_idx) +{ + int ret; + + if (!dev || !dbgfs || !dirs || !dbgfs->cmd_info) + return -EINVAL; + + ret = ubase_dbg_create_dir(dev, dirs, root_idx); + if (ret) { + dev_err(dev, + "failed to create ubase debugfs dirs, ret = %d.\n", ret); + return ret; + } + + ret = ubase_dbg_create_file(dev, dbgfs, dirs); + if (ret) + dev_err(dev, + "failed to create ubase debugfs files, ret = %d.\n", ret); + + return ret; +} +EXPORT_SYMBOL(ubase_dbg_create_dentry); + +int ubase_dbg_init(struct ubase_dev *udev) +{ + const char *name = dev_name(udev->dev); + struct device *dev = udev->dev; + int ret; + + udev->dbgfs.dentry = debugfs_create_dir(name, ubase_dbgfs_root); + if (IS_ERR(udev->dbgfs.dentry)) { + ubase_err(udev, "failed to create ubase debugfs root dir.\n"); + return PTR_ERR(udev->dbgfs.dentry); + } + + ubase_dbg_dentry[UBASE_DBG_DENTRY_ROOT].dentry = udev->dbgfs.dentry; + udev->dbgfs.cmd_info = ubase_dbg_cmd; + udev->dbgfs.cmd_info_size = ARRAY_SIZE(ubase_dbg_cmd); + + ret = ubase_dbg_create_dentry(dev, &udev->dbgfs, ubase_dbg_dentry, + ARRAY_SIZE(ubase_dbg_dentry) - 1); + if (ret) { + ubase_err(udev, + "failed to create ubase debugfs dentry, ret = %d.\n", + ret); + goto create_dentry_err; + } + + return 0; + +create_dentry_err: + debugfs_remove_recursive(udev->dbgfs.dentry); + + return ret; +} + +void ubase_dbg_uninit(struct ubase_dev *udev) +{ + debugfs_remove_recursive(udev->dbgfs.dentry); +} + +int ubase_dbg_register_debugfs(void) +{ +#define UBASE_DBGFS_ROOT "ubase" + + ubase_dbgfs_root = debugfs_create_dir(UBASE_DBGFS_ROOT, NULL); + if (IS_ERR(ubase_dbgfs_root)) + return PTR_ERR(ubase_dbgfs_root); + + return 0; +} + +void ubase_dbg_unregister_debugfs(void) +{ + debugfs_remove_recursive(ubase_dbgfs_root); +} diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.h b/drivers/ub/ubase/debugfs/ubase_debugfs.h new file mode 100644 index 000000000000..8b68b75d4cfc --- /dev/null +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_DEBUGFS_H__ +#define __UBASE_DEBUGFS_H__ + +#include +#include +#include + +#include "ubase_dev.h" + +enum ubase_dbg_dentry_type { + UBASE_DBG_DENTRY_CONTEXT = 0, + UBASE_DBG_DENTRY_QOS, + /* must be the last entry. */ + UBASE_DBG_DENTRY_ROOT, +}; + +int ubase_dbg_init(struct ubase_dev *udev); +void ubase_dbg_uninit(struct ubase_dev *udev); +int ubase_dbg_register_debugfs(void); +void ubase_dbg_unregister_debugfs(void); + +#endif diff --git a/drivers/ub/ubase/ubase.h b/drivers/ub/ubase/ubase.h new file mode 100644 index 000000000000..1f1a157d8a98 --- /dev/null +++ b/drivers/ub/ubase/ubase.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_H__ +#define __UBASE_H__ + +#include +#include + +#define UBASE_CAP_LEN 3 +#define UBASE_MAX_TCG_NUM (4) + +struct ubase_delay_work { + struct delayed_work service_task; + unsigned long state; +}; + +enum { + UBASE_SUPPORT_UBL_B = 0, + UBASE_SUPPORT_TA_EXTDB_BUF_B = 2, + UBASE_SUPPORT_TA_TIMER_BUF_B = 3, + UBASE_SUPPORT_ERR_HANDLE_B = 4, + UBASE_SUPPORT_CTRLQ_B = 5, + UBASE_SUPPORT_ETH_MAC_B = 6, + UBASE_SUPPORT_MAC_STATS_B = 10, + UBASE_SUPPORT_PRE_ALLOC_B = 13, + UBASE_SUPPORT_UDMA_DISABLE_B = 14, + UBASE_SUPPORT_UNIC_DISABLE_B = 15, + UBASE_SUPPORT_UVB_B = 16, + UBASE_SUPPORT_IP_OVER_URMA_B = 17, + UBASE_SUPPORT_IP_OVER_URMA_UTP_B = 18, + UBASE_SUPPORT_ACTIVATE_PROXY_B = 19, + UBASE_SUPPORT_UTP_B = 20, + + /* must be last entry and it should <= UBASE_CAP_LEN * 32 */ + UBASE_SUPPORT_MASK_NBITS +}; + +#define ubase_get_cap_bit(udev, nr) \ + test_bit(nr, (unsigned long *)((udev)->cap_bits)) + +#endif diff --git a/drivers/ub/ubase/ubase_cmd.c b/drivers/ub/ubase/ubase_cmd.c new file mode 100644 index 000000000000..ab47d8ae9021 --- /dev/null +++ b/drivers/ub/ubase/ubase_cmd.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include + +#include "ubase_cmd.h" + +int __ubase_register_crq_event(struct ubase_dev *udev, + struct ubase_crq_event_nb *nb) +{ + struct ubase_crq_event_nbs *nbs, *tmp, *new_nbs; + struct ubase_crq_table *crq_table; + int ret; + + crq_table = &udev->crq_table; + mutex_lock(&crq_table->lock); + list_for_each_entry_safe(nbs, tmp, &crq_table->nbs.list, list) { + if (unlikely(nbs->nb.opcode == nb->opcode)) { + ret = -EEXIST; + goto err_crq_register; + } + } + + new_nbs = kzalloc(sizeof(*new_nbs), GFP_KERNEL); + if (!new_nbs) { + ret = -ENOMEM; + goto err_crq_register; + } + + new_nbs->nb = *nb; + list_add_tail(&new_nbs->list, &crq_table->nbs.list); + mutex_unlock(&crq_table->lock); + + return 0; + +err_crq_register: + mutex_unlock(&crq_table->lock); + + ubase_err(udev, "failed to register crq event, opcode = 0x%x, ret = %d.\n", + nb->opcode, ret); + return ret; +} + +int ubase_register_crq_event(struct auxiliary_device *aux_dev, + struct ubase_crq_event_nb *nb) +{ + struct ubase_dev *udev; + + if (!aux_dev || !nb || !nb->crq_handler) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(aux_dev); + return __ubase_register_crq_event(udev, nb); +} +EXPORT_SYMBOL(ubase_register_crq_event); + +void __ubase_unregister_crq_event(struct ubase_dev *udev, u16 opcode) +{ + struct ubase_crq_event_nbs *nbs, *tmp; + struct ubase_crq_table *crq_table; + + crq_table = &udev->crq_table; + mutex_lock(&crq_table->lock); + list_for_each_entry_safe(nbs, tmp, &crq_table->nbs.list, list) { + if (nbs->nb.opcode == opcode) { + list_del(&nbs->list); + kfree(nbs); + break; + } + } + mutex_unlock(&crq_table->lock); +} + +void ubase_unregister_crq_event(struct auxiliary_device *aux_dev, u16 opcode) +{ + struct ubase_dev *udev; + + if (!aux_dev) + return; + + udev = __ubase_get_udev_by_adev(aux_dev); + __ubase_unregister_crq_event(udev, opcode); +} +EXPORT_SYMBOL(ubase_unregister_crq_event); diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h new file mode 100644 index 000000000000..0f1d92ebe511 --- /dev/null +++ b/drivers/ub/ubase/ubase_cmd.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_CMD_H__ +#define __UBASE_CMD_H__ + +#include + +#include "ubase_dev.h" + +int __ubase_register_crq_event(struct ubase_dev *udev, + struct ubase_crq_event_nb *nb); +void __ubase_unregister_crq_event(struct ubase_dev *udev, u16 opcode); + +#endif diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c new file mode 100644 index 000000000000..be052e9cad61 --- /dev/null +++ b/drivers/ub/ubase/ubase_dev.c @@ -0,0 +1,656 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include + +#include "debugfs/ubase_debugfs.h" +#include "ubase_cmd.h" +#include "ubase_dev.h" + +#define UBASE_PERIOD_100MS 100 + +static int ubase_debug; +module_param_named(debug, ubase_debug, int, 0644); +MODULE_PARM_DESC(debug, "enable ubase debug log, 0:disable, others:enable, default:0"); + +static DEFINE_IDA(ubase_adev_ida); + +bool ubase_dev_urma_supported(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + + switch (uent_device(ue)) { + case UBASE_DEV_ID_K_0_URMA_MUE: + case UBASE_DEV_ID_K_0_URMA_UE: + case UBASE_DEV_ID_A_0_URMA_MUE: + case UBASE_DEV_ID_A_0_URMA_UE: + case UBASE_DEV_ID_A_0_UBOE_MUE: + case UBASE_DEV_ID_A_0_UBOE_UE: + break; + default: + return false; + } + + return true; +} + +bool ubase_dev_unic_supported(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + + switch (uent_device(ue)) { + case UBASE_DEV_ID_K_0_URMA_MUE: + case UBASE_DEV_ID_A_0_URMA_MUE: + case UBASE_DEV_ID_A_0_UBOE_MUE: + break; + default: + return false; + } + + return !ubase_get_cap_bit(udev, UBASE_SUPPORT_UNIC_DISABLE_B); +} + +bool ubase_dev_cdma_supported(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + + switch (uent_device(ue)) { + case UBASE_DEV_ID_K_0_CDMA_MUE: + case UBASE_DEV_ID_K_0_CDMA_UE: + case UBASE_DEV_ID_A_0_CDMA_MUE: + case UBASE_DEV_ID_A_0_CDMA_UE: + break; + default: + return false; + } + + return true; +} + +bool ubase_dev_pmu_supported(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + + switch (uent_device(ue)) { + case UBASE_DEV_ID_K_0_PMU_MUE: + case UBASE_DEV_ID_K_0_PMU_UE: + case UBASE_DEV_ID_A_0_PMU_MUE: + case UBASE_DEV_ID_A_0_PMU_UE: + break; + default: + return false; + } + + return true; +} + +bool ubase_dev_fwctl_supported(struct ubase_dev *udev) +{ + return ubase_dev_pmu_supported(udev); +} + +static struct ubase_adev_device { + const char *suffix; + bool (*is_supported)(struct ubase_dev *dev); +} ubase_adev_devices[UBASE_DRV_MAX] = { + [UBASE_DRV_UNIC] = { + .suffix = "unic", + .is_supported = &ubase_dev_unic_supported + }, + [UBASE_DRV_UDMA] = { + .suffix = "udma", + .is_supported = &ubase_dev_udma_supported + }, + [UBASE_DRV_CDMA] = { + .suffix = "cdma", + .is_supported = &ubase_dev_cdma_supported + }, + [UBASE_DRV_FWCTL] = { + .suffix = "fwctl", + .is_supported = &ubase_dev_fwctl_supported + }, + [UBASE_DRV_PMU] = { + .suffix = "pmu", + .is_supported = &ubase_dev_pmu_supported + }, + [UBASE_DRV_UVB] = { + .suffix = "uvb", + .is_supported = &ubase_dev_uvb_supported + }, +}; + +int ubase_adev_idx_alloc(void) +{ + return ida_alloc(&ubase_adev_ida, GFP_KERNEL); +} + +void ubase_adev_idx_free(int id) +{ + ida_free(&ubase_adev_ida, id); +} + +static void ubase_port_handler(struct ubase_dev *udev, bool link_up) +{ + struct ubase_adev *uadev; + int i; + + if (!test_bit(UBASE_STATE_INITED_B, &udev->state_bits)) + return; + + mutex_lock(&udev->priv.uadev_lock); + for (i = 0; i < UBASE_DRV_MAX; i++) { + uadev = udev->priv.uadev[i]; + if (!uadev) + continue; + + mutex_lock(&uadev->port_lock); + if (uadev->port_handler) + uadev->port_handler(&uadev->adev, link_up); + mutex_unlock(&uadev->port_lock); + } + mutex_unlock(&udev->priv.uadev_lock); +} + +void ubase_port_down(struct ubase_dev *udev) +{ + ubase_port_handler(udev, 0); +} + +void ubase_port_up(struct ubase_dev *udev) +{ + ubase_port_handler(udev, 1); +} + +static void ubase_comm_adev_release(struct device *dev) +{ + struct ubase_adev *ubase_adev = + container_of(dev, struct ubase_adev, adev.dev); + + kfree(ubase_adev); +} + +static struct ubase_adev *ubase_add_one_adev(struct ubase_dev *udev, int idx) +{ + struct ubase_adev *uadev; + int ret; + + uadev = kzalloc(sizeof(struct ubase_adev), GFP_KERNEL); + if (!uadev) { + ubase_err(udev, "failed to alloc auxiliary device(%s.%d).\n", + ubase_adev_devices[idx].suffix, udev->dev_id); + return ERR_PTR(-ENOMEM); + } + + uadev->adev.name = ubase_adev_devices[idx].suffix; + uadev->adev.id = (u32)udev->dev_id; + uadev->adev.dev.parent = udev->dev; + uadev->adev.dev.release = ubase_comm_adev_release; + uadev->idx = idx; + uadev->udev = udev; + ATOMIC_INIT_NOTIFIER_HEAD(&uadev->comp_nh); + + ret = auxiliary_device_init(&uadev->adev); + if (ret) { + kfree(uadev); + ubase_err(udev, + "failed to init auxiliary device(%s.%d), ret = %d\n", + ubase_adev_devices[idx].suffix, udev->dev_id, ret); + return ERR_PTR(ret); + } + + ret = auxiliary_device_add(&uadev->adev); + if (ret) { + auxiliary_device_uninit(&uadev->adev); + ubase_err(udev, + "failed to add auxiliary device(%s.%d), ret = %d\n", + ubase_adev_devices[idx].suffix, udev->dev_id, ret); + return ERR_PTR(ret); + } + + mutex_init(&uadev->virt_lock); + mutex_init(&uadev->port_lock); + mutex_init(&uadev->reset_lock); + mutex_init(&uadev->activate_lock); + + return uadev; +} + +static void ubase_del_one_adev(struct ubase_dev *udev, int idx) +{ + struct ubase_priv *priv = &udev->priv; + struct ubase_adev *uadev; + + uadev = priv->uadev[idx]; + + mutex_destroy(&uadev->activate_lock); + mutex_destroy(&uadev->reset_lock); + mutex_destroy(&uadev->port_lock); + mutex_destroy(&uadev->virt_lock); + auxiliary_device_delete(&uadev->adev); + auxiliary_device_uninit(&uadev->adev); + priv->uadev[idx] = NULL; +} + +static int ubase_init_aux_devices(struct ubase_dev *udev) +{ + struct ubase_priv *priv = &udev->priv; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(ubase_adev_devices); i++) { + if (priv->uadev[i]) + continue; + + if (!ubase_adev_devices[i].is_supported || + !ubase_adev_devices[i].is_supported(udev)) + continue; + + priv->uadev[i] = ubase_add_one_adev(udev, i); + if (IS_ERR(priv->uadev[i])) { + ret = PTR_ERR(priv->uadev[i]); + priv->uadev[i] = NULL; + ubase_err(udev, + "failed to load auxiliary device(%s.%d)\n", + ubase_adev_devices[i].suffix, udev->dev_id); + goto err_add_aux_dev; + } + } + + mutex_init(&udev->priv.uadev_lock); + + return 0; + +err_add_aux_dev: + for (; i >= 0; i--) { + if (!priv->uadev[i]) + continue; + + ubase_del_one_adev(udev, i); + } + + return ret; +} + +static void ubase_uninit_aux_devices(struct ubase_dev *udev) +{ + struct ubase_priv *priv = &udev->priv; + int i; + + mutex_lock(&priv->uadev_lock); + for (i = ARRAY_SIZE(ubase_adev_devices) - 1; i >= 0; i--) { + if (!priv->uadev[i]) + continue; + + ubase_del_one_adev(udev, i); + } + mutex_unlock(&priv->uadev_lock); + + mutex_destroy(&udev->priv.uadev_lock); +} + +static void ubase_cancel_period_service_task(struct ubase_dev *udev) +{ + if (udev->period_service_task.service_task.work.func) + cancel_delayed_work_sync(&udev->period_service_task.service_task); +} + +static int ubase_enable_period_service_task(struct ubase_dev *udev) +{ + struct ubase_delay_work *period_work = &udev->period_service_task; + unsigned long delta; + + delta = round_jiffies_relative(msecs_to_jiffies(UBASE_PERIOD_100MS)); + mod_delayed_work(udev->ubase_period_wq, + &period_work->service_task, + delta); + + return 0; +} + +static void ubase_period_service_task(struct work_struct *work) +{ +#define UBASE_STATS_TIMER_INTERVAL (300000 / (UBASE_PERIOD_100MS)) +#define UBASE_QUERY_SL_TIMER_INTERVAL (1000 / (UBASE_PERIOD_100MS)) + + struct ubase_delay_work *ubase_work = + container_of(work, struct ubase_delay_work, service_task.work); + struct ubase_dev *udev = container_of(ubase_work, struct ubase_dev, + period_service_task); + + if (test_bit(UBASE_STATE_DISABLED_B, &udev->state_bits)) { + ubase_enable_period_service_task(udev); + return; + } + + udev->serv_proc_cnt++; + ubase_enable_period_service_task(udev); +} + +static void ubase_init_delayed_work(struct ubase_dev *udev) +{ + INIT_DELAYED_WORK(&udev->period_service_task.service_task, + ubase_period_service_task); +} + +static int ubase_wq_init(struct ubase_dev *udev) +{ +#define UBASE_ALLOC_WQ(name) alloc_workqueue("%s", WQ_UNBOUND, 0, name) + + udev->ubase_wq = UBASE_ALLOC_WQ("ubase"); + if (!udev->ubase_wq) { + ubase_err(udev, "failed to alloc ubase workqueue.\n"); + goto err_alloc_ubase_wq; + } + + udev->ubase_async_wq = UBASE_ALLOC_WQ("ubase_async_service"); + if (!udev->ubase_async_wq) { + ubase_err(udev, "failed to alloc ubase async workqueue.\n"); + goto err_alloc_ubase_async_wq; + } + + udev->ubase_reset_wq = UBASE_ALLOC_WQ("ubase_reset_service"); + if (!udev->ubase_reset_wq) { + ubase_err(udev, "failed to alloc ubase reset workqueue.\n"); + goto err_alloc_ubase_reset_wq; + } + + udev->ubase_period_wq = UBASE_ALLOC_WQ("ubase_period_service"); + if (!udev->ubase_period_wq) { + ubase_err(udev, "failed to alloc ubase period workqueue.\n"); + goto err_alloc_ubase_period_wq; + } + + udev->ubase_arq_wq = UBASE_ALLOC_WQ("ubase_arq_service"); + if (!udev->ubase_arq_wq) { + ubase_err(udev, "failed to alloc ubase arq workqueue.\n"); + goto err_alloc_ubase_arq_wq; + } + + ubase_init_delayed_work(udev); + return 0; + +err_alloc_ubase_arq_wq: + destroy_workqueue(udev->ubase_period_wq); +err_alloc_ubase_period_wq: + destroy_workqueue(udev->ubase_reset_wq); +err_alloc_ubase_reset_wq: + destroy_workqueue(udev->ubase_async_wq); +err_alloc_ubase_async_wq: + destroy_workqueue(udev->ubase_wq); +err_alloc_ubase_wq: + return -ENOMEM; +} + +static void ubase_wq_uninit(struct ubase_dev *udev) +{ + destroy_workqueue(udev->ubase_arq_wq); + destroy_workqueue(udev->ubase_period_wq); + destroy_workqueue(udev->ubase_reset_wq); + destroy_workqueue(udev->ubase_async_wq); + destroy_workqueue(udev->ubase_wq); +} + +static struct ubase_crq_event_nb ubase_crq_events[] = { +}; + +static void ubase_unregister_cmdq_crq_event(struct ubase_dev *udev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ubase_crq_events); i++) + __ubase_unregister_crq_event(udev, ubase_crq_events[i].opcode); +} + +static int ubase_register_cmdq_crq_event(struct ubase_dev *udev) +{ + int ret, i; + + for (i = 0; i < ARRAY_SIZE(ubase_crq_events); i++) { + ubase_crq_events[i].back = udev; + + ret = __ubase_register_crq_event(udev, &ubase_crq_events[i]); + if (ret) { + ubase_err(udev, + "failed to register crq event[%d], ret = %d.\n", + i, ret); + goto err_reg_event; + } + } + + return 0; + +err_reg_event: + for (i = i - 1; i >= 0; i--) + __ubase_unregister_crq_event(udev, ubase_crq_events[i].opcode); + return ret; +} + +static const struct ubase_init_function ubase_init_func_map[] = { + { + "init work queue", UBASE_SUP_ALL, 0, + ubase_wq_init, ubase_wq_uninit + }, + { + "init cmd queue", UBASE_SUP_ALL, 1, + NULL, NULL + }, + { + "query dev res", UBASE_SUP_ALL, 0, + NULL, NULL + }, + { + "init mailbox", UBASE_SUP_NO_PMU, 0, + NULL, NULL + }, + { + "query chip info", UBASE_SUP_ALL, 0, + NULL, NULL + }, + { + "query controller_info", UBASE_SUP_NO_PMU, 0, + NULL, NULL + }, + { + "query hw oor caps", UBASE_SUP_NO_PMU, 0, + NULL, NULL + }, + { + "init irq table", UBASE_SUP_NO_PMU, 1, + NULL, NULL + }, + { + "init ctrl queue", UBASE_SUP_NO_PMU, 1, + NULL, NULL + }, + { + "register aeq event", UBASE_SUP_NO_PMU, 0, + ubase_register_ae_event, ubase_unregister_ae_event + }, + { + "register cmdq crq event", UBASE_SUP_NO_PMU, 0, + ubase_register_cmdq_crq_event, ubase_unregister_cmdq_crq_event + }, + { + "register ctrlq crq event", UBASE_SUP_NO_PMU, 0, + NULL, NULL + }, + { + "init qos", UBASE_SUP_ALL, 0, + NULL, NULL + }, + { + "prealloc memory", UBASE_SUP_UDMA, 1, + NULL, NULL + }, + { + "init ue", UBASE_SUP_NO_PMU, 0, + NULL, NULL + }, + { + "init hw", UBASE_SUP_NO_PMU, 1, + NULL, NULL + }, + { + "init debugfs", UBASE_SUP_ALL, 0, + ubase_dbg_init, ubase_dbg_uninit + }, + { + "init auxiliary devices", UBASE_SUP_ALL, 0, + ubase_init_aux_devices, ubase_uninit_aux_devices + }, + { + "enable period service task", UBASE_SUP_NO_PMU, 0, + ubase_enable_period_service_task, ubase_cancel_period_service_task + }, + { + "enable ce irq", UBASE_SUP_NO_PMU, 1, + NULL, NULL + }, +}; + +static bool ubase_init_func_support(struct ubase_dev *udev, u32 support) +{ + return (((support & UBASE_SUP_UNIC) && ubase_dev_unic_supported(udev)) || + ((support & UBASE_SUP_UDMA) && ubase_dev_udma_supported(udev)) || + ((support & UBASE_SUP_CDMA) && ubase_dev_cdma_supported(udev)) || + ((support & UBASE_SUP_PMU) && ubase_dev_pmu_supported(udev))); +} + +int ubase_dev_init(struct ubase_dev *udev) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(ubase_init_func_map); i++) { + if (!ubase_init_func_support(udev, + ubase_init_func_map[i].support_devs)) + continue; + + if (ubase_init_func_map[i].init_func) { + ret = ubase_init_func_map[i].init_func(udev); + if (ret) { + ubase_err(udev, "failed to %s, ret = %d.\n", + ubase_init_func_map[i].err_msg, ret); + goto err_init; + } + } + } + + set_bit(UBASE_STATE_INITED_B, &udev->state_bits); + + return 0; + +err_init: + for (i -= 1; i >= 0; i--) { + if (!ubase_init_func_support(udev, + ubase_init_func_map[i].support_devs)) + continue; + + if (ubase_init_func_map[i].uninit_func) + ubase_init_func_map[i].uninit_func(udev); + } + + return ret; +} + +void ubase_dev_uninit(struct ubase_dev *udev) +{ + int i; + + if (udev->service_task.service_task.work.func) + cancel_delayed_work_sync(&udev->service_task.service_task); + flush_workqueue(udev->ubase_async_wq); + + for (i = ARRAY_SIZE(ubase_init_func_map) - 1; i >= 0; i--) { + if (!ubase_init_func_support(udev, + ubase_init_func_map[i].support_devs)) + continue; + + if (ubase_init_func_map[i].uninit_func) + ubase_init_func_map[i].uninit_func(udev); + } +} + +bool ubase_adev_ubl_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_dev_ubl_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_ubl_supported); + +bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev) +{ + if (!adev) + return false; + + return ubase_dev_eth_mac_supported(__ubase_get_udev_by_adev(adev)); +} +EXPORT_SYMBOL(ubase_adev_eth_mac_supported); + +struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev) +{ + if (!adev) + return NULL; + + return &__ubase_get_udev_by_adev(adev)->hw.io_base; +} +EXPORT_SYMBOL(ubase_get_io_base); + +struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev) +{ + if (!adev) + return NULL; + + return &__ubase_get_udev_by_adev(adev)->hw.mem_base; +} +EXPORT_SYMBOL(ubase_get_mem_base); + +struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev) +{ + if (!adev) + return NULL; + + return &__ubase_get_udev_by_adev(adev)->caps.dev_caps; +} +EXPORT_SYMBOL(ubase_get_dev_caps); + +struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev) +{ + struct ubase_dev *udev; + + if (!adev) + return NULL; + + udev = __ubase_get_udev_by_adev(adev); + + return &udev->caps.udma_caps; +} +EXPORT_SYMBOL(ubase_get_udma_caps); + +struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev) +{ + return ubase_get_udma_caps(adev); +} +EXPORT_SYMBOL(ubase_get_cdma_caps); + +struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev) +{ + struct ubase_dev *udev; + + if (!adev) + return NULL; + + udev = __ubase_get_udev_by_adev(adev); + + return &udev->caps.unic_caps; +} +EXPORT_SYMBOL(ubase_get_unic_caps); + +bool ubase_dbg_default(void) +{ + return ubase_debug; +} diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h new file mode 100644 index 000000000000..fe587046b291 --- /dev/null +++ b/drivers/ub/ubase/ubase_dev.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_DEV_H__ +#define __UBASE_DEV_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ubase.h" +#include "ubase_eq.h" +#include "ubase_ubus.h" + +#define UBASE_MOD_VERSION "1.0" + +#define ubase_dbg(_udev, fmt, ...) do { \ + if (ubase_dbg_default()) \ + dev_info(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__); \ + } while (0) + +#define ubase_err(_udev, fmt, ...) \ + dev_err(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__) + +#define ubase_info(_udev, fmt, ...) \ + dev_info(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__) + +#define ubase_warn(_udev, fmt, ...) \ + dev_warn(_udev->dev, "(pid %d) " fmt, \ + current->pid, ##__VA_ARGS__) + +struct ubase_adev { + struct auxiliary_device adev; + struct ubase_dev *udev; + struct atomic_notifier_head comp_nh; + struct notifier_block comp_notifier; + int idx; + + struct mutex virt_lock; + void (*virt_handler)(struct auxiliary_device *adev, u16 bus_ue_id, + bool is_en); + struct mutex port_lock; + void (*port_handler)(struct auxiliary_device *adev, bool link_up); + struct mutex reset_lock; + void (*reset_handler)(struct auxiliary_device *adev, + enum ubase_reset_stage stage); + struct mutex activate_lock; + void (*activate_handler)(struct auxiliary_device *adev, bool activate); +}; + +struct ubase_priv { + struct ubase_adev *uadev[UBASE_DRV_MAX]; + struct mutex uadev_lock; /* protect uadev[] */ +}; + +struct ubase_dev_caps { + struct ubase_adev_caps udma_caps; + struct ubase_adev_caps unic_caps; + struct ubase_caps dev_caps; + struct ubase_ue_caps ue_caps; +}; + +struct ubase_mbox_cmd { + struct dma_pool *pool; + struct semaphore sem; + struct ubase_mbx_event_context ctx; +}; + +struct ubase_dma_buf { + void *addr; + dma_addr_t dma_addr; + size_t size; +}; + +struct ubase_ta_layer_ctx { + struct ubase_dma_buf extdb_buf; + struct ubase_dma_buf timer_buf; +}; + +struct ubase_tp_layer_ctx { + spinlock_t tpg_lock; + struct ubase_tpg *tpg; +}; + +struct ubase_reset_stat { + u32 reset_done_cnt; + u32 hw_reset_done_cnt; + u32 elr_reset_cnt; + u32 reset_fail_cnt; + u32 reset_retry_cnt; + u32 port_reset_cnt; + u32 himac_reset_cnt; +}; + +enum ubase_dev_state_bit { + UBASE_STATE_INITED_B, + UBASE_STATE_DISABLED_B, + UBASE_STATE_RST_HANDLING_B, + UBASE_STATE_IRQ_INVALID_B, + UBASE_STATE_PORT_RESETTING_B, + UBASE_STATE_HIMAC_RESETTING_B, + UBASE_STATE_CTX_READY_B, + UBASE_STATE_PREALLOC_OK_B, +}; + +struct ubase_crq_event_nbs { + struct list_head list; + struct ubase_crq_event_nb nb; +}; + +struct ubase_crq_table { + struct mutex lock; + unsigned long last_crq_scheduled; + struct ubase_crq_event_nbs nbs; +}; + +#define UBASE_ACT_STAT_MAX_NUM 10U +struct ubase_activate_dev_stats { + u64 act_cnt; + u64 deact_cnt; + struct { + bool activate; + int result; + time64_t time; + } stats[UBASE_ACT_STAT_MAX_NUM]; + struct mutex lock; +}; + +struct ubase_stats { + struct mutex stats_lock; + struct ubase_eth_mac_stats eth_stats; + struct ubase_activate_dev_stats activate_record; +}; + +struct ubase_act_info { + u16 wait_msn; + int result; + struct completion activate_done; +}; + +struct ubase_act_ctx { + u16 msn; + struct ubase_act_info self; + struct ubase_act_info other; + struct mutex lock; +}; + +struct ubase_arq_msg { + u16 opcode; + void *data; + u32 data_len; +}; + +#define MAX_ARQ_MSG_NUM 128 +struct ubase_arq_msg_ring { + u8 pi; + u8 ci; + atomic_t count; + struct ubase_arq_msg msg[MAX_ARQ_MSG_NUM]; +}; + +struct ubase_pmem_ctx { + u16 page_cnt; + struct page **pgs; + struct scatterlist *sg; + dma_addr_t dma_addr; +}; + +struct ubase_prealloc_mem_info { + struct ubase_pmem_ctx comm; + struct ubase_pmem_ctx udma; +}; + +struct ubase_dev { + struct device *dev; + int dev_id; + struct ubase_priv priv; + struct ubase_hw hw; + + struct ubase_dev_caps caps; + struct ubase_adev_qos qos; + struct ubase_dbgfs dbgfs; + struct ubase_ctx_buf ctx_buf; + struct ubase_ta_layer_ctx ta_ctx; + struct ubase_tp_layer_ctx tp_ctx; + u32 cap_bits[UBASE_CAP_LEN]; + struct ubase_irq_table irq_table; + struct ubase_mbox_cmd mb_cmd; + struct workqueue_struct *ubase_wq; + struct workqueue_struct *ubase_async_wq; + struct workqueue_struct *ubase_reset_wq; + struct workqueue_struct *ubase_period_wq; + struct workqueue_struct *ubase_arq_wq; + unsigned long serv_proc_cnt; + struct ubase_delay_work service_task; + struct ubase_delay_work reset_service_task; + struct ubase_delay_work period_service_task; + struct ubase_delay_work arq_service_task; + struct ubase_crq_table crq_table; + unsigned long state_bits; + struct list_head ue_list; + struct mutex ue_list_lock; + + struct ubase_reset_stat reset_stat; + enum ubase_reset_type reset_type; + unsigned long last_reset_scheduled; + enum ubase_reset_stage reset_stage; + struct ubase_stats stats; + struct ubase_act_ctx act_ctx; + struct ubase_arq_msg_ring arq; + struct ubase_prealloc_mem_info pmem_info; +}; + +#define UBASE_ERR_MSG_LEN 128 +struct ubase_init_function { + char err_msg[UBASE_ERR_MSG_LEN]; + u32 support_devs; + u8 need_reset; + int (*init_func)(struct ubase_dev *udev); + void (*uninit_func)(struct ubase_dev *udev); +}; + +bool ubase_dbg_default(void); +bool ubase_dev_urma_supported(struct ubase_dev *udev); +bool ubase_dev_unic_supported(struct ubase_dev *udev); +bool ubase_dev_cdma_supported(struct ubase_dev *udev); +bool ubase_dev_pmu_supported(struct ubase_dev *udev); +bool ubase_dev_fwctl_supported(struct ubase_dev *udev); + +static inline +struct ubase_dev *__ubase_get_udev_by_adev(struct auxiliary_device *adev) +{ + struct ubase_adev *uadev = container_of(adev, struct ubase_adev, adev); + + return uadev->udev; +} + +static inline bool ubase_dev_uvb_supported(struct ubase_dev *udev) +{ + return ubase_get_cap_bit(udev, UBASE_SUPPORT_UVB_B); +} + +static inline +struct ubase_dev *ubase_get_udev_by_adev(struct auxiliary_device *adev) +{ + if (!adev) + return NULL; + + return __ubase_get_udev_by_adev(adev); +} + +static inline bool ubase_dev_udma_supported(struct ubase_dev *udev) +{ + return ubase_dev_urma_supported(udev) && + !ubase_get_cap_bit(udev, UBASE_SUPPORT_UDMA_DISABLE_B); +} + +static inline bool ubase_dev_ubl_supported(struct ubase_dev *udev) +{ + return ubase_get_cap_bit(udev, UBASE_SUPPORT_UBL_B); +} + +static inline bool ubase_dev_eth_mac_supported(struct ubase_dev *udev) +{ + return ubase_get_cap_bit(udev, UBASE_SUPPORT_ETH_MAC_B); +} + +int ubase_adev_idx_alloc(void); +void ubase_adev_idx_free(int id); + +void ubase_port_down(struct ubase_dev *udev); +void ubase_port_up(struct ubase_dev *udev); + +int ubase_dev_init(struct ubase_dev *udev); +void ubase_dev_uninit(struct ubase_dev *udev); + +#endif diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c new file mode 100644 index 000000000000..98e4bfe69ef6 --- /dev/null +++ b/drivers/ub/ubase/ubase_eq.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include + +#include "ubase_dev.h" +#include "ubase_eq.h" + +static int __ubase_event_register(struct ubase_dev *udev, + struct ubase_event_nb *cb) +{ + struct blocking_notifier_head *nh; + int ret; + + if (cb->drv_type >= UBASE_DRV_MAX) { + ubase_err(udev, "unsupported drv_type(%u).\n", cb->drv_type); + return -EINVAL; + } + + if (cb->event_type >= UBASE_EVENT_TYPE_MAX) { + ubase_err(udev, + "unsupported event type(%u).\n", cb->event_type); + return -EINVAL; + } + + nh = udev->irq_table.nh[cb->drv_type]; + ret = blocking_notifier_chain_register(&nh[cb->event_type], &cb->nb); + if (ret) + ubase_err(udev, + "failed to notifier chain register, type = %u, ret = %d.\n", + cb->event_type, ret); + + return ret; +} + +int ubase_event_register(struct auxiliary_device *adev, + struct ubase_event_nb *cb) +{ + if (!adev || !cb) + return -EINVAL; + + return __ubase_event_register(__ubase_get_udev_by_adev(adev), cb); +} +EXPORT_SYMBOL(ubase_event_register); + +static void __ubase_event_unregister(struct ubase_dev *udev, + struct ubase_event_nb *cb) +{ + struct blocking_notifier_head *nh; + int ret; + + if (cb->drv_type >= UBASE_DRV_MAX) { + ubase_err(udev, "unsupported drv_type(%u).\n", cb->drv_type); + return; + } + + if (cb->event_type >= UBASE_EVENT_TYPE_MAX) { + ubase_err(udev, + "unsupported event type(%u).\n", cb->event_type); + return; + } + + nh = udev->irq_table.nh[cb->drv_type]; + ret = blocking_notifier_chain_unregister(&nh[cb->event_type], &cb->nb); + if (ret) + ubase_err(udev, + "failed to unregister notifier chain, type = %u, ret = %d.\n", + cb->event_type, ret); +} + +void ubase_event_unregister(struct auxiliary_device *adev, + struct ubase_event_nb *cb) +{ + if (!adev || !cb) + return; + + __ubase_event_unregister(__ubase_get_udev_by_adev(adev), cb); +} +EXPORT_SYMBOL(ubase_event_unregister); + +int ubase_comp_register(struct auxiliary_device *adev, + int (*comp_handler)(struct notifier_block *nb, + unsigned long jfcn, void *data)) +{ + struct ubase_adev *uadev; + int ret; + + if (!adev || !comp_handler) + return -EINVAL; + + uadev = container_of(adev, struct ubase_adev, adev); + uadev->comp_notifier.notifier_call = comp_handler; + ret = atomic_notifier_chain_register(&uadev->comp_nh, + &uadev->comp_notifier); + if (ret) + ubase_err(uadev->udev, + "failed to register comp notifier chain, ret = %d.\n", + ret); + + return ret; +} +EXPORT_SYMBOL(ubase_comp_register); + +void ubase_comp_unregister(struct auxiliary_device *adev) +{ + struct ubase_adev *uadev; + int ret; + + if (!adev) + return; + + uadev = container_of(adev, struct ubase_adev, adev); + + ret = atomic_notifier_chain_unregister(&uadev->comp_nh, + &uadev->comp_notifier); + if (ret) + ubase_err(uadev->udev, + "failed to unregister comp notifier chain, ret = %d.\n", + ret); +} +EXPORT_SYMBOL(ubase_comp_unregister); + +static void __ubase_unregister_ae_event(struct ubase_dev *udev, int num) +{ + int i; + + for (i = 0; i < num; i++) + __ubase_event_unregister(udev, &udev->irq_table.aeq.cb[i]); +} + +void ubase_unregister_ae_event(struct ubase_dev *udev) +{ + __ubase_unregister_ae_event(udev, UBASE_AE_LEVEL_NUM); +} + +int ubase_register_ae_event(struct ubase_dev *udev) +{ + struct ubase_event_nb ubase_ae_nbs[UBASE_AE_LEVEL_NUM] = {}; + struct ubase_aeq *aeq = &udev->irq_table.aeq; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(ubase_ae_nbs); i++) { + aeq->cb[i] = ubase_ae_nbs[i]; + ret = __ubase_event_register(udev, &aeq->cb[i]); + if (ret) { + ubase_err(udev, + "failed to register asyn event[%u], ret = %d", + aeq->cb[i].event_type, ret); + __ubase_unregister_ae_event(udev, i); + return ret; + } + } + + return 0; +} diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h new file mode 100644 index 000000000000..5602e166a82d --- /dev/null +++ b/drivers/ub/ubase/ubase_eq.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_EQ_H__ +#define __UBASE_EQ_H__ + +#include + +#include "ubase.h" + +#define UBASE_MIN_IRQ_NUM 3 /* for misc aeq ceq */ + +#define UBASE_INT_NAME_LEN 32 + +#define UBASE_AE_LEVEL_NUM 4 + +struct ubase_irq { + char name[UBASE_INT_NAME_LEN]; + int irqn; +}; + +struct ubase_eq_addr { + void *addr; + dma_addr_t dma_addr; + size_t size; +}; + +struct ubase_eq { + void __iomem *db_reg; + struct ubase_eq_addr addr; + u32 eqn; + u32 entries_num; + u32 state; + u32 arm_st; + u8 eqe_size; + u32 eq_period; + u32 coalesce_cnt; + int irqn; + u32 eqc_irqn; /* irqn for eqc */ + u32 cons_index; +}; + +struct ubase_ceq { + struct ubase_dev *udev; + struct ubase_eq eq; +}; + +struct ubase_aeq { + struct ubase_dev *udev; + struct ubase_eq eq; + struct ubase_event_nb cb[UBASE_AE_LEVEL_NUM]; +}; + +struct ubase_ceqs { + struct ubase_ceq *ceq; + u32 num; +}; + +struct ubase_irq_table { + struct mutex ceq_lock; + struct ubase_ceqs ceqs; + struct ubase_aeq aeq; + struct blocking_notifier_head nh[UBASE_DRV_MAX][UBASE_EVENT_TYPE_MAX]; + + struct ubase_irq **irqs; /* first one for misc */ + u32 irqs_num; +}; + +int ubase_register_ae_event(struct ubase_dev *udev); +void ubase_unregister_ae_event(struct ubase_dev *udev); + +#endif diff --git a/drivers/ub/ubase/ubase_main.c b/drivers/ub/ubase/ubase_main.c new file mode 100644 index 000000000000..2c97a842c3dd --- /dev/null +++ b/drivers/ub/ubase/ubase_main.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include + +#include "debugfs/ubase_debugfs.h" + +static int __init ubase_init(void) +{ + int ret; + + ret = ubase_dbg_register_debugfs(); + if (ret) + return ret; + + ret = ubase_ubus_register_driver(); + if (ret) + ubase_dbg_unregister_debugfs(); + + return ret; +} + +static void __exit ubase_exit(void) +{ + ubase_ubus_unregister_driver(); + ubase_dbg_unregister_debugfs(); +} + +module_init(ubase_init); +module_exit(ubase_exit); + +MODULE_DESCRIPTION("UBASE: Hisilicon Network Driver"); +MODULE_IMPORT_NS(UB_UBFI); +MODULE_IMPORT_NS(UB_UBUS); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UBASE_MOD_VERSION); diff --git a/drivers/ub/ubase/ubase_reset.h b/drivers/ub/ubase/ubase_reset.h new file mode 100644 index 000000000000..ae1ae9b7fdc1 --- /dev/null +++ b/drivers/ub/ubase/ubase_reset.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_RESET_H__ +#define __UBASE_RESET_H__ + +#include "ubase_dev.h" + +#define UBASE_RST_WAIT_TIME 100 + +#endif diff --git a/drivers/ub/ubase/ubase_ubus.c b/drivers/ub/ubase/ubase_ubus.c new file mode 100644 index 000000000000..7b05707955e0 --- /dev/null +++ b/drivers/ub/ubase/ubase_ubus.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include +#include + +#include "ubase_cmd.h" +#include "ubase_dev.h" +#include "ubase_reset.h" +#include "ubase_ubus.h" + +static const char ubase_ubus_driver_name[] = "ubase"; + +static const struct ub_device_id ubase_ubus_tbl[] = { + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_URMA_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_URMA_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_CDMA_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_CDMA_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_PMU_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_K_0_PMU_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_URMA_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_URMA_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_CDMA_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_CDMA_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_PMU_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_PMU_UE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_UBOE_MUE), 0, 0}, + {UB_ENTITY(UBASE_VENDOR_ID, UBASE_DEV_ID_A_0_UBOE_UE), 0, 0}, + /* required last entry */ + {0}, +}; +MODULE_DEVICE_TABLE(ub, ubase_ubus_tbl); + +static int ubase_ubus_init(struct ub_entity *ue) +{ +#define UBASE_UBUS_DMA_BIT 48 + + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + int ret; + + ub_entity_enable(ue, 1); + + ret = dma_set_mask_and_coherent(&ue->dev, + DMA_BIT_MASK(UBASE_UBUS_DMA_BIT)); + if (ret) { + ubase_err(udev, + "can't set consistent UBUS DMA, ret = %d.\n", ret); + goto err_enable_device; + } + + udev->hw.io_base.addr_unmapped = ub_resource_start(ue, UBASE_UBUS_IO_RESOURCE); + udev->hw.io_base.addr = ub_iomap(ue, UBASE_UBUS_IO_RESOURCE, 0); + if (!udev->hw.io_base.addr) { + ubase_err(udev, "failed to map io base.\n"); + ret = -ENOMEM; + goto err_enable_device; + } + + udev->hw.mem_base.addr_unmapped = ub_resource_start(ue, UBASE_UBUS_MEM_RESOURCE); + udev->hw.mem_base.addr = devm_ioremap_wc(&ue->dev, + ub_resource_start(ue, UBASE_UBUS_MEM_RESOURCE), + ub_resource_len(ue, UBASE_UBUS_MEM_RESOURCE)); + if (!udev->hw.mem_base.addr) { + ubase_err(udev, "failed to map memory base.\n"); + ret = -ENOMEM; + goto err_unmap_io_base; + } + + udev->hw.rs0_base.addr_unmapped = ub_resource_start(ue, UBASE_UBUS_RESOURCE_0); + udev->hw.rs0_base.addr = ub_iomap(ue, UBASE_UBUS_RESOURCE_0, 0); + if (!udev->hw.rs0_base.addr) { + ubase_err(udev, "failed to map resource0 addr.\n"); + ret = -ENOMEM; + goto err_unmap_mem_base; + } + + return 0; + +err_unmap_mem_base: + devm_iounmap(&ue->dev, udev->hw.mem_base.addr); + udev->hw.mem_base.addr = NULL; +err_unmap_io_base: + ub_iounmap(udev->hw.io_base.addr); + udev->hw.io_base.addr = NULL; +err_enable_device: + ub_entity_enable(ue, 0); + + return ret; +} + +static void ubase_ubus_uninit(struct ub_entity *ue) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + if (udev->hw.io_base.addr) { + ub_iounmap(udev->hw.io_base.addr); + udev->hw.io_base.addr = NULL; + } + + if (udev->hw.mem_base.addr) { + devm_iounmap(&ue->dev, udev->hw.mem_base.addr); + udev->hw.mem_base.addr = NULL; + } + + if (udev->hw.rs0_base.addr) { + ub_iounmap(udev->hw.rs0_base.addr); + udev->hw.rs0_base.addr = NULL; + } + + ub_entity_enable(ue, 0); +} + +static void ubase_port_reset_prepare(struct ub_entity *ue, u16 port_id) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + ubase_info(udev, "port %u reset prepare.\n", port_id); + ubase_port_down(udev); +} + +static void ubase_port_reset_done(struct ub_entity *ue, u16 port_id) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + ubase_port_up(udev); + ubase_info(udev, "port %u reset done.\n", port_id); + udev->reset_stat.port_reset_cnt++; +} + +static struct ub_share_port_ops ubase_share_port_ops = { + .reset_prepare = ubase_port_reset_prepare, + .reset_done = ubase_port_reset_done +}; + +static int ubase_ubus_reg_share_port(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + struct ubase_caps *caps = &udev->caps.dev_caps; + int ret; + + if (!ubase_dev_ubl_supported(udev)) + return 0; + + ret = ub_register_share_port(ue, caps->ub_port_logic_id, + &ubase_share_port_ops); + if (ret) + ubase_err(udev, + "failed to register share logical port %u, ret = %d.\n", + caps->ub_port_logic_id, ret); + + return ret; +} + +static void ubase_ubus_unreg_share_port(struct ubase_dev *udev) +{ + struct ub_entity *ue = container_of(udev->dev, struct ub_entity, dev); + struct ubase_caps *caps = &udev->caps.dev_caps; + + if (!ubase_dev_ubl_supported(udev)) + return; + + ub_unregister_share_port(ue, caps->ub_port_logic_id, + &ubase_share_port_ops); +} + +/* ubase_ubus_probe - Device initialization routine + * @ue: ub entity information struct + * @utbl_entry: entry in ubase_ubus_tbl + * + * ubase_ubus_probe initializes an UE identified by an ub_entity structure. + * + * Returns 0 on success, negative on failure + */ +static int ubase_ubus_probe(struct ub_entity *ue, + const struct ub_device_id *utbl_entry) +{ + struct ubase_dev *udev; + int ret; + + ub_set_user_info(ue); + + udev = devm_kzalloc(&ue->dev, sizeof(*udev), GFP_KERNEL); + if (!udev) { + dev_err(&ue->dev, "failed to alloc ubase dev.\n"); + return -ENOMEM; + } + + udev->dev = &ue->dev; + udev->dev_id = ubase_adev_idx_alloc(); + if (udev->dev_id < 0) { + ubase_err(udev, + "failed to alloc dev id(%d).\n", udev->dev_id); + devm_kfree(&ue->dev, udev); + return udev->dev_id; + } + + udev->caps.dev_caps.tid = ue->tid; + udev->caps.dev_caps.eid = ue->eid; + udev->caps.dev_caps.upi = ue->upi; + udev->caps.dev_caps.ctl_no = ue->ubc->ctl_no; + + dev_set_drvdata(&ue->dev, udev); + + ret = ubase_ubus_init(ue); + if (ret) { + ubase_err(udev, + "failed to init ubus, ret = %d.\n", ret); + goto err_ubus_init; + } + + ret = ubase_dev_init(udev); + if (ret) { + ubase_err(udev, + "failed to init ubase dev, ret = %d.\n", ret); + goto err_udev_init; + } + + ret = ubase_ubus_reg_share_port(udev); + if (ret) + goto err_register_share_port; + + return 0; + +err_register_share_port: + ubase_dev_uninit(udev); +err_udev_init: + ubase_ubus_uninit(ue); +err_ubus_init: + dev_set_drvdata(&ue->dev, NULL); + ubase_adev_idx_free(udev->dev_id); + devm_kfree(&ue->dev, udev); + ub_unset_user_info(ue); + + return ret; +} + +static void __ubase_ubus_remove(struct ub_entity *ue) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + ubase_ubus_unreg_share_port(udev); + ubase_dev_uninit(udev); + ubase_ubus_uninit(ue); + ub_unset_user_info(ue); + ubase_adev_idx_free(udev->dev_id); + dev_set_drvdata(&ue->dev, NULL); + devm_kfree(&ue->dev, udev); +} + +/* ubase_remove - Device removal routine + * @ue: ub entity information struct + */ +static void ubase_ubus_remove(struct ub_entity *ue) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + while (test_and_set_bit(UBASE_STATE_DISABLED_B, &udev->state_bits)) + msleep(UBASE_RST_WAIT_TIME); + + ub_disable_entities(ue); + __ubase_ubus_remove(ue); +} + +static void ubase_ubus_shutdown(struct ub_entity *ue) +{ + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + + while (test_and_set_bit(UBASE_STATE_DISABLED_B, &udev->state_bits)) + msleep(UBASE_RST_WAIT_TIME); + + ubase_dbg(udev, "ubase_shutdown start.\n"); + + __ubase_ubus_remove(ue); +} + +int ubase_ubus_irq_vectors_alloc(struct device *dev) +{ + struct ub_entity *ue = container_of(dev, struct ub_entity, dev); + struct ubase_dev *udev = dev_get_drvdata(&ue->dev); + int irqs_num; + + udev->irq_table.irqs_num = udev->caps.dev_caps.num_aeq_vectors + + udev->caps.dev_caps.num_ceq_vectors + + udev->caps.dev_caps.num_misc_vectors; + + irqs_num = ub_alloc_irq_vectors(ue, UBASE_MIN_IRQ_NUM, + udev->irq_table.irqs_num); + if (irqs_num == -ENOSPC) { + ubase_err(udev, + "bus is unable to provide sufficient number of interrupts.\n"); + goto out; + } + + if (irqs_num < 0) { + ubase_err(udev, + "failed to allocate USI vectors, irqs_num = %d.\n", + irqs_num); + goto out; + } + + if ((u32)irqs_num < udev->irq_table.irqs_num) { + ubase_warn(udev, + "need to allocate %u USI resource, but only allocated %d.\n", + udev->irq_table.irqs_num, irqs_num); + udev->irq_table.irqs_num = (u32)irqs_num; + } + + return 0; + +out: + ub_disable_intr(ue); + return -EFAULT; +} + +void ubase_ubus_irq_vectors_free(struct device *dev) +{ + struct ub_entity *ue = container_of(dev, struct ub_entity, dev); + + ub_disable_intr(ue); +} + +int ubase_ubus_irq_vector(struct device *dev, u32 idx) +{ + struct ub_entity *ue = container_of(dev, struct ub_entity, dev); + + return ub_irq_vector(ue, idx); +} + +static struct ub_driver ubase_ubus_driver = { + .name = ubase_ubus_driver_name, + .id_table = ubase_ubus_tbl, + .probe = ubase_ubus_probe, + .remove = ubase_ubus_remove, + .shutdown = ubase_ubus_shutdown, + .driver = {}, +}; + +int ubase_ubus_register_driver(void) +{ + return ub_register_driver(&ubase_ubus_driver); +} + +void ubase_ubus_unregister_driver(void) +{ + ub_unregister_driver(&ubase_ubus_driver); +} + +int ubase_ubus_reset_entry(struct device *dev) +{ + struct ub_entity *ue = container_of(dev, struct ub_entity, dev); + struct ubase_dev *udev = dev_get_drvdata(dev); + int ret; + + ret = ub_reset_entity(ue); + if (ret) + ubase_err(udev, "failed to trigger reset, ret = %d.\n", ret); + + return ret; +} + +void ubase_ubus_reinit(struct device *dev) +{ + struct ub_entity *ue = container_of(dev, struct ub_entity, dev); + + ub_set_user_info(ue); + ub_entity_enable(ue, 1); +} diff --git a/drivers/ub/ubase/ubase_ubus.h b/drivers/ub/ubase/ubase_ubus.h new file mode 100644 index 000000000000..3e85fe7bfaea --- /dev/null +++ b/drivers/ub/ubase/ubase_ubus.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_UBUS_H__ +#define __UBASE_UBUS_H__ + +#include + +#define UBASE_UBUS_IO_RESOURCE 2 +#define UBASE_UBUS_MEM_RESOURCE 1 +#define UBASE_UBUS_RESOURCE_0 0 + +#define UBASE_VENDOR_ID 0xCC08 + +#define UBASE_DEV_ID_K_0_URMA_MUE 0xA001 +#define UBASE_DEV_ID_K_0_URMA_UE 0xA002 +#define UBASE_DEV_ID_K_0_CDMA_MUE 0xA003 +#define UBASE_DEV_ID_K_0_CDMA_UE 0xA004 +#define UBASE_DEV_ID_K_0_PMU_MUE 0xA005 +#define UBASE_DEV_ID_K_0_PMU_UE 0xA006 + +#define UBASE_DEV_ID_A_0_URMA_MUE 0xD802 +#define UBASE_DEV_ID_A_0_URMA_UE 0xD803 +#define UBASE_DEV_ID_A_0_CDMA_MUE 0xD804 +#define UBASE_DEV_ID_A_0_CDMA_UE 0xD805 +#define UBASE_DEV_ID_A_0_PMU_MUE 0xD806 +#define UBASE_DEV_ID_A_0_PMU_UE 0xD807 +#define UBASE_DEV_ID_A_0_UBOE_MUE 0xD80B +#define UBASE_DEV_ID_A_0_UBOE_UE 0xD80C + +struct ubase_bus_eid; + +int ubase_ubus_register_driver(void); +void ubase_ubus_unregister_driver(void); + +int ubase_ubus_irq_vectors_alloc(struct device *dev); +void ubase_ubus_irq_vectors_free(struct device *dev); +int ubase_ubus_irq_vector(struct device *dev, u32 idx); + +int ubase_ubus_reset_entry(struct device *dev); +void ubase_ubus_reinit(struct device *dev); + +#endif diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h new file mode 100644 index 000000000000..e7089e59d1bf --- /dev/null +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_CMD_H_ +#define _UB_UBASE_COMM_CMD_H_ + +#include +#include + +struct ubase_crq_event_nb { + u16 opcode; + void *back; + int (*crq_handler)(void *dev, void *data, u32 len); +}; + +int ubase_register_crq_event(struct auxiliary_device *aux_dev, + struct ubase_crq_event_nb *nb); +void ubase_unregister_crq_event(struct auxiliary_device *aux_dev, u16 opcode); + +#endif diff --git a/include/ub/ubase/ubase_comm_debugfs.h b/include/ub/ubase/ubase_comm_debugfs.h new file mode 100644 index 000000000000..c3d9c473d646 --- /dev/null +++ b/include/ub/ubase/ubase_comm_debugfs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_DEBUGFS_H_ +#define _UB_UBASE_COMM_DEBUGFS_H_ + +#include + +struct ubase_dbgfs; + +struct ubase_dbg_dentry_info { + const char *name; + struct dentry *dentry; + u32 property; + bool (*support)(struct device *dev, u32 property); +}; + +struct ubase_dbg_cmd_info { + const char *name; + int dentry_index; + u32 property; + bool (*support)(struct device *dev, u32 property); + int (*init)(struct device *dev, struct ubase_dbg_dentry_info *dirs, + struct ubase_dbgfs *dbgfs, u32 idx); + int (*read_func)(struct seq_file *s, void *data); +}; + +struct ubase_dbgfs { + struct dentry *dentry; /* dbgfs root path */ + struct ubase_dbg_cmd_info *cmd_info; + int cmd_info_size; +}; + +int ubase_dbg_create_dentry(struct device *dev, struct ubase_dbgfs *dbgfs, + struct ubase_dbg_dentry_info *dirs, u32 root_idx); + +#endif diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h new file mode 100644 index 000000000000..e1d6a3d32d6c --- /dev/null +++ b/include/ub/ubase/ubase_comm_dev.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_DEV_H_ +#define _UB_UBASE_COMM_DEV_H_ + +#include +#include +#include + +struct iova_slot; + +#define UBASE_ADEV_NAME "ubase" + +#define UBASE_MAX_DSCP (64) +#define UBASE_MAX_SL_NUM (16U) +#define UBASE_MAX_VL_NUM (16U) +#if UBASE_MAX_VL_NUM < IEEE_8021QAZ_MAX_TCS +#error "UBASE_MAX_VL_NUM can't less than IEEE_8021QAZ_MAX_TCS" +#endif + +#define UBASE_SUP_UBL BIT(0) +#define UBASE_SUP_ETH BIT(1) +#define UBASE_SUP_UNIC BIT(2) +#define UBASE_SUP_UDMA BIT(3) +#define UBASE_SUP_CDMA BIT(4) +#define UBASE_SUP_PMU BIT(5) +#define UBASE_SUP_URMA (UBASE_SUP_UNIC | UBASE_SUP_UDMA) +#define UBASE_SUP_UBL_ETH (UBASE_SUP_UBL | UBASE_SUP_ETH) +#define UBASE_SUP_ALL (UBASE_SUP_UNIC | UBASE_SUP_UDMA | \ + UBASE_SUP_CDMA | UBASE_SUP_PMU) +#define UBASE_SUP_NO_PMU (UBASE_SUP_ALL ^ UBASE_SUP_PMU) + +enum ubase_reset_type { + UBASE_NO_RESET, + UBASE_ELR_RESET, + UBASE_UE_RESET, + UBASE_MAX_RESET, +}; + +enum ubase_reset_stage { + UBASE_RESET_STAGE_NONE, + UBASE_RESET_STAGE_DOWN, + UBASE_RESET_STAGE_UNINIT, + UBASE_RESET_STAGE_INIT, + UBASE_RESET_STAGE_UP, +}; + +struct ubase_caps { + u16 num_ceq_vectors; + u16 num_aeq_vectors; + u16 num_misc_vectors; + + u32 aeqe_depth; + u32 ceqe_depth; + u16 aeqe_size; + u16 ceqe_size; + + u32 total_ue_num; + u32 public_jetty_cnt; + u8 vl_num; + u16 rsvd_jetty_cnt; + + u8 req_vl[UBASE_MAX_VL_NUM]; + u8 resp_vl[UBASE_MAX_VL_NUM]; + + u8 packet_pattern_mode; + u8 ack_queue_num; + u8 oor_en; + u8 reorder_queue_en; + u32 on_flight_size; + u8 reorder_cap; + u8 reorder_queue_shift; + u8 at_times; + u8 ue_num; + u16 mac_stats_num; + + u32 logic_port_bitmap; + u16 ub_port_logic_id; + u16 io_port_logic_id; + u16 io_port_id; + u16 nl_port_id; + u16 chip_id; + u16 die_id; + u16 ue_id; + u16 nl_id; + + u32 tid; + u32 eid; + u16 upi; + u32 ctl_no; + + u32 fw_version; +}; + +struct ubase_res_caps { + u32 max_cnt; + u32 start_idx; + u32 reserved_cnt; + u32 depth; +}; + +struct ubase_pmem_caps { + u64 dma_len; + dma_addr_t dma_addr; +}; + +struct ubase_adev_caps { + struct ubase_res_caps jfs; + struct ubase_res_caps jfr; + struct ubase_res_caps jfc; + struct ubase_res_caps tp; + struct ubase_res_caps tpg; + struct ubase_pmem_caps pmem; + u32 utp_port_bitmap; /* utp port bitmap */ + u32 jtg_max_cnt; + u32 rc_max_cnt; + u32 rc_que_depth; + u32 ccc_max_cnt; + u32 dest_addr_max_cnt; + u32 seid_upi_max_cnt; + u32 tpm_max_cnt; + u16 cqe_size; +}; + +struct ubase_ctx_buf_cap { + dma_addr_t dma_ctx_buf_ba; /* pass to hw */ + struct iova_slot *slot; + u32 entry_size; + u32 entry_cnt; + u32 cnt_per_page_shift; + struct xarray ctx_xa; + struct mutex ctx_mutex; +}; + +struct ubase_ctx_buf { + struct ubase_ctx_buf_cap jfs; + struct ubase_ctx_buf_cap jfr; + struct ubase_ctx_buf_cap jfc; + struct ubase_ctx_buf_cap jtg; + struct ubase_ctx_buf_cap rc; + + struct ubase_ctx_buf_cap tp; + struct ubase_ctx_buf_cap tpg; +}; + +struct net_device; +struct ubase_adev_com { + struct auxiliary_device *adev; + struct net_device *netdev; +}; + +struct ubase_resource_space { + resource_size_t addr_unmapped; + void __iomem *addr; +}; + +struct ubase_adev_qos { + /* udma/cdma resource */ + u8 sl_num; + u8 sl[UBASE_MAX_SL_NUM]; + u8 tp_sl_num; + u8 tp_sl[UBASE_MAX_SL_NUM]; + u8 ctp_sl_num; + u8 ctp_sl[UBASE_MAX_SL_NUM]; + + u8 vl_num; + u8 vl[UBASE_MAX_VL_NUM]; + u8 tp_vl_num; + u8 tp_resp_vl_offset; + u8 tp_req_vl[UBASE_MAX_VL_NUM]; + u8 ctp_vl_num; + u8 ctp_resp_vl_offset; + u8 ctp_req_vl[UBASE_MAX_VL_NUM]; + + u8 dscp_vl[UBASE_MAX_DSCP]; + + /* unic resource */ + u8 nic_sl_num; + u8 nic_sl[UBASE_MAX_SL_NUM]; + + u8 nic_vl_num; + u8 nic_vl[UBASE_MAX_VL_NUM]; + + /* common resource */ + u8 ue_max_vl_id; + u8 ue_sl_vl[UBASE_MAX_SL_NUM]; +}; + +struct ubase_ue_caps { + u8 ceq_vector_num; + u8 aeq_vector_num; + u32 aeqe_depth; + u32 ceqe_depth; + u32 jfs_max_cnt; + u32 jfs_depth; + u32 jfr_max_cnt; + u32 jfr_depth; + u32 jfc_max_cnt; + u32 jfc_depth; + u32 rc_max_cnt; + u32 rc_depth; + u32 jtg_max_cnt; +}; + +#define UBASE_BUS_EID_LEN 4 +struct ubase_bus_eid { + u32 eid[UBASE_BUS_EID_LEN]; +}; + +bool ubase_adev_ubl_supported(struct auxiliary_device *adev); +bool ubase_adev_eth_mac_supported(struct auxiliary_device *adev); + +struct ubase_resource_space *ubase_get_io_base(struct auxiliary_device *adev); +struct ubase_resource_space *ubase_get_mem_base(struct auxiliary_device *adev); +struct ubase_caps *ubase_get_dev_caps(struct auxiliary_device *adev); +struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev); +struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev); +struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev); + +#endif diff --git a/include/ub/ubase/ubase_comm_eq.h b/include/ub/ubase/ubase_comm_eq.h new file mode 100644 index 000000000000..2dabcc0537ed --- /dev/null +++ b/include/ub/ubase/ubase_comm_eq.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_EQ_H_ +#define _UB_UBASE_COMM_EQ_H_ + +#include +#include + +enum ubase_drv_type { + UBASE_DRV_UNIC, + UBASE_DRV_UDMA, + UBASE_DRV_CDMA, + UBASE_DRV_FWCTL, + UBASE_DRV_PMU, + UBASE_DRV_UVB, + UBASE_DRV_MAX, +}; + +enum ubase_event_type { + UBASE_EVENT_TYPE_RESERVED = 0x00, + UBASE_EVENT_TYPE_JETTY_LEVEL_ERROR = 0x01, + UBASE_EVENT_TYPE_TP_LEVEL_ERROR = 0x02, + UBASE_EVENT_TYPE_ENTITY_LEVEL_ERROR = 0x03, + UBASE_EVENT_TYPE_TP_FLUSH_DONE = 0x10, + UBASE_EVENT_TYPE_ENTITY_FLUSH_DONE = 0x11, + UBASE_EVENT_TYPE_JFR_LIMIT_REACHED = 0x12, + UBASE_EVENT_TYPE_MB = 0x13, + UBASE_EVENT_TYPE_CHECK_TOKEN = 0x14, + UBASE_EVENT_TYPE_MAX +}; + +struct ubase_event_nb { + enum ubase_drv_type drv_type; + u8 event_type; + struct notifier_block nb; + void *back; +}; + +int ubase_event_register(struct auxiliary_device *adev, + struct ubase_event_nb *cb); +void ubase_event_unregister(struct auxiliary_device *adev, + struct ubase_event_nb *cb); + +int ubase_comp_register(struct auxiliary_device *adev, + int (*comp_handler)(struct notifier_block *nb, + unsigned long jfcn, void *data)); +void ubase_comp_unregister(struct auxiliary_device *adev); + +#endif diff --git a/include/ub/ubase/ubase_comm_hw.h b/include/ub/ubase/ubase_comm_hw.h new file mode 100644 index 000000000000..e212b4a7d2bc --- /dev/null +++ b/include/ub/ubase/ubase_comm_hw.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_HW_H_ +#define _UB_UBASE_COMM_HW_H_ + +#include +#include + +#define UBASE_DESC_DATA_LEN 6 +struct ubase_cmdq_desc { + __le16 opcode; + u8 flag; + u8 bd_num; + __le16 ret; + __le16 rsv; + __le32 data[UBASE_DESC_DATA_LEN]; +}; + +struct ubase_cmdq_ring { + u32 ci; + u32 pi; + u32 desc_num; + u32 tx_timeout; + dma_addr_t desc_dma_addr; + struct ubase_cmdq_desc *desc; + spinlock_t lock; +}; + +struct ubase_cmdq { + struct ubase_cmdq_ring csq; + struct ubase_cmdq_ring crq; +}; + +struct ubase_hw { + struct ubase_resource_space rs0_base; + struct ubase_resource_space io_base; + struct ubase_resource_space mem_base; + struct ubase_cmdq cmdq; + unsigned long state; +}; + +struct ubase_mbx_event_context { + struct completion done; + int result; + u64 out_param; + u16 seq_num; +}; + +#endif diff --git a/include/ub/ubase/ubase_comm_stats.h b/include/ub/ubase/ubase_comm_stats.h new file mode 100644 index 000000000000..80d32bc9273b --- /dev/null +++ b/include/ub/ubase/ubase_comm_stats.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_STATS_H_ +#define _UB_UBASE_COMM_STATS_H_ + +#include + +struct ubase_eth_mac_stats { + u64 tx_fragment_pkts; + u64 tx_undersize_pkts; + u64 tx_undermin_pkts; + + u64 tx_64_octets_pkts; + u64 tx_65_127_octets_pkts; + u64 tx_128_255_octets_pkts; + u64 tx_256_511_octets_pkts; + u64 tx_512_1023_octets_pkts; + u64 tx_1024_1518_octets_pkts; + u64 tx_1519_2047_octets_pkts; + u64 tx_2048_4095_octets_pkts; + u64 tx_4096_8191_octets_pkts; + u64 tx_8192_9216_octets_pkts; + u64 tx_9217_12287_octets_pkts; + u64 tx_12288_16383_octets_pkts; + u64 tx_1519_max_octets_bad_pkts; + u64 tx_1519_max_octets_good_pkts; + u64 tx_oversize_pkts; + u64 tx_jabber_pkts; + u64 tx_bad_pkts; + u64 tx_bad_octets; + u64 tx_good_pkts; + u64 tx_good_octets; + u64 tx_total_pkts; + u64 tx_total_octets; + u64 tx_unicast_pkts; + u64 tx_multicast_pkts; + u64 tx_broadcast_pkts; + + u64 tx_pause_pkts; + u64 tx_pfc_pkts; + u64 tx_pri0_pfc_pkts; + u64 tx_pri1_pfc_pkts; + u64 tx_pri2_pfc_pkts; + u64 tx_pri3_pfc_pkts; + u64 tx_pri4_pfc_pkts; + u64 tx_pri5_pfc_pkts; + u64 tx_pri6_pfc_pkts; + u64 tx_pri7_pfc_pkts; + + u64 tx_mac_ctrl_pkts; + u64 tx_unfilter_pkts; + u64 tx_1588_pkts; + u64 tx_err_all_pkts; + u64 tx_from_app_good_pkts; + u64 tx_from_app_bad_pkts; + + u64 rx_fragment_pkts; + u64 rx_undersize_pkts; + u64 rx_undermin_pkts; + + u64 rx_64_octets_pkts; + u64 rx_65_127_octets_pkts; + u64 rx_128_255_octets_pkts; + u64 rx_256_511_octets_pkts; + u64 rx_512_1023_octets_pkts; + u64 rx_1024_1518_octets_pkts; + u64 rx_1519_2047_octets_pkts; + u64 rx_2048_4095_octets_pkts; + u64 rx_4096_8191_octets_pkts; + u64 rx_8192_9216_octets_pkts; + u64 rx_9217_12287_octets_pkts; + u64 rx_12288_16383_octets_pkts; + u64 rx_1519_max_octets_bad_pkts; + u64 rx_1519_max_octets_good_pkts; + + u64 rx_oversize_pkts; + u64 rx_jabber_pkts; + u64 rx_bad_pkts; + u64 rx_bad_octets; + u64 rx_good_pkts; + u64 rx_good_octets; + u64 rx_total_pkts; + u64 rx_total_octets; + u64 rx_unicast_pkts; + u64 rx_multicast_pkts; + u64 rx_broadcast_pkts; + + u64 rx_pause_pkts; + u64 rx_pfc_pkts; + u64 rx_pri0_pfc_pkts; + u64 rx_pri1_pfc_pkts; + u64 rx_pri2_pfc_pkts; + u64 rx_pri3_pfc_pkts; + u64 rx_pri4_pfc_pkts; + u64 rx_pri5_pfc_pkts; + u64 rx_pri6_pfc_pkts; + u64 rx_pri7_pfc_pkts; + + u64 rx_mac_ctrl_pkts; + u64 rx_symbol_err_pkts; + u64 rx_fcs_err_pkts; + u64 rx_send_app_good_pkts; + u64 rx_send_app_bad_pkts; + u64 rx_unfilter_pkts; + + u64 tx_merge_frame_ass_error_pkts; + u64 tx_merge_frame_ass_ok_pkts; + u64 tx_merge_frame_frag_count; + u64 rx_merge_frame_ass_error_pkts; + u64 rx_merge_frame_ass_ok_pkts; + u64 rx_merge_frame_frag_count; + u64 rx_merge_frame_smd_error_pkts; +}; + +#endif /* _UBASE_COMM_STATS_H */ -- Gitee From 8d68017f37faefe5f545dbfd994e1b5141ec06e1 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 16:07:48 +0800 Subject: [PATCH 02/10] ub: ubase: support for command process drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch support for command proccess by command queue and provides interfaces to other modules, such as udma and unic etc. The command queue is used by the driver to interact with the imp. Signed-off-by: Xiaobo Zhang Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/Makefile | 2 +- drivers/ub/ubase/ubase.h | 22 + drivers/ub/ubase/ubase_cmd.c | 858 ++++++++++++++++++++++++++++++ drivers/ub/ubase/ubase_cmd.h | 80 +++ drivers/ub/ubase/ubase_dev.c | 5 +- drivers/ub/ubase/ubase_dev.h | 7 +- drivers/ub/ubase/ubase_mailbox.c | 542 +++++++++++++++++++ drivers/ub/ubase/ubase_mailbox.h | 52 ++ include/ub/ubase/ubase_comm_cmd.h | 85 +++ include/ub/ubase/ubase_comm_dev.h | 1 + include/ub/ubase/ubase_comm_eq.h | 31 ++ include/ub/ubase/ubase_comm_mbx.h | 94 ++++ 12 files changed, 1775 insertions(+), 4 deletions(-) create mode 100644 drivers/ub/ubase/ubase_mailbox.c create mode 100644 drivers/ub/ubase/ubase_mailbox.h create mode 100644 include/ub/ubase/ubase_comm_mbx.h diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile index 17069b9f624e..10adf2df0859 100644 --- a/drivers/ub/ubase/Makefile +++ b/drivers/ub/ubase/Makefile @@ -10,7 +10,7 @@ ccflags-y += -I$(srctree)/drivers/ub/ubase/debugfs MODULE_NAME := ubase UBASE_OBJS := ubase_main.o ubase_dev.o ubase_cmd.o \ - debugfs/ubase_debugfs.o ubase_eq.o ubase_ubus.o + debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o $(MODULE_NAME)-objs := $(UBASE_OBJS) obj-$(CONFIG_UB_UBASE) := ubase.o diff --git a/drivers/ub/ubase/ubase.h b/drivers/ub/ubase/ubase.h index 1f1a157d8a98..dd684141890c 100644 --- a/drivers/ub/ubase/ubase.h +++ b/drivers/ub/ubase/ubase.h @@ -13,6 +13,11 @@ #define UBASE_CAP_LEN 3 #define UBASE_MAX_TCG_NUM (4) +enum ubase_service_state { + UBASE_STATE_CRQ_SERVICE_SCHED, + UBASE_STATE_CRQ_HANDLING, +}; + struct ubase_delay_work { struct delayed_work service_task; unsigned long state; @@ -42,4 +47,21 @@ enum { #define ubase_get_cap_bit(udev, nr) \ test_bit(nr, (unsigned long *)((udev)->cap_bits)) +static inline void ubase_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +static inline u32 ubase_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define ubase_write_dev(a, reg, value) \ + ubase_write_reg((a)->io_base.addr, reg, value) +#define ubase_read_dev(a, reg) \ + ubase_read_reg((a)->io_base.addr, reg) + #endif diff --git a/drivers/ub/ubase/ubase_cmd.c b/drivers/ub/ubase/ubase_cmd.c index ab47d8ae9021..81891811dfa3 100644 --- a/drivers/ub/ubase/ubase_cmd.c +++ b/drivers/ub/ubase/ubase_cmd.c @@ -8,6 +8,864 @@ #include "ubase_cmd.h" +static int ubase_alloc_cmd_queue(struct ubase_dev *udev, + struct ubase_cmdq_ring *ring) +{ + size_t size = ring->desc_num * sizeof(struct ubase_cmdq_desc); + + ring->desc = dma_alloc_coherent(udev->dev, size, &ring->desc_dma_addr, + GFP_KERNEL); + if (!ring->desc) { + ubase_err(udev, "failed to alloc cmdq dma addr.\n"); + return -ENOMEM; + } + + return 0; +} + +static void ubase_free_cmd_queue(struct ubase_dev *udev, + struct ubase_cmdq_ring *ring) +{ + size_t size = ring->desc_num * sizeof(struct ubase_cmdq_desc); + + if (!ring->desc) + return; + + dma_free_coherent(udev->dev, size, ring->desc, ring->desc_dma_addr); + ring->desc = NULL; +} + +static int ubase_cmd_queue_init(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + int ret; + + spin_lock_init(&csq->lock); + spin_lock_init(&crq->lock); + + csq->pi = 0; + csq->ci = 0; + crq->pi = 0; + crq->ci = 0; + csq->desc_num = UBASE_CMDQ_DESC_NUM; + crq->desc_num = UBASE_CMDQ_DESC_NUM; + csq->tx_timeout = UBASE_CMDQ_TX_TIMEOUT; + + ret = ubase_alloc_cmd_queue(udev, csq); + if (ret) { + ubase_err(udev, "failed to alloc csq, ret = %d.\n", ret); + return ret; + } + + ret = ubase_alloc_cmd_queue(udev, crq); + if (ret) { + ubase_err(udev, "failed to alloc crq, ret = %d.\n", ret); + goto err_csq; + } + + return 0; + +err_csq: + ubase_free_cmd_queue(udev, csq); + return ret; +} + +static void ubase_cmd_queue_uninit(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + + ubase_free_cmd_queue(udev, csq); + ubase_free_cmd_queue(udev, crq); +} + +static void ubase_cmd_init_regs(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + u32 reg_val; + + spin_lock_bh(&csq->lock); + spin_lock(&crq->lock); + + /* csq init */ + ubase_write_dev(&udev->hw, UBASE_CSQ_BASEADDR_L_REG, + lower_32_bits(csq->desc_dma_addr)); + ubase_write_dev(&udev->hw, UBASE_CSQ_BASEADDR_H_REG, + upper_32_bits(csq->desc_dma_addr)); + reg_val = csq->desc_num >> UBASE_CMDQ_DESC_NUM_S; + ubase_write_dev(&udev->hw, UBASE_CSQ_DEPTH_REG, reg_val); + ubase_write_dev(&udev->hw, UBASE_CSQ_HEAD_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CSQ_TAIL_REG, 0); + + /* crq init */ + ubase_write_dev(&udev->hw, UBASE_CRQ_BASEADDR_L_REG, + lower_32_bits(crq->desc_dma_addr)); + ubase_write_dev(&udev->hw, UBASE_CRQ_BASEADDR_H_REG, + upper_32_bits(crq->desc_dma_addr)); + reg_val = crq->desc_num >> UBASE_CMDQ_DESC_NUM_S; + ubase_write_dev(&udev->hw, UBASE_CRQ_DEPTH_REG, reg_val); + ubase_write_dev(&udev->hw, UBASE_CRQ_HEAD_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CRQ_TAIL_REG, 0); + + spin_unlock(&crq->lock); + spin_unlock_bh(&csq->lock); +} + +static void ubase_cmd_uninit_regs(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + + spin_lock_bh(&csq->lock); + spin_lock(&crq->lock); + + ubase_write_dev(&udev->hw, UBASE_CSQ_BASEADDR_L_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CSQ_BASEADDR_H_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CSQ_DEPTH_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CSQ_HEAD_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CSQ_TAIL_REG, 0); + + ubase_write_dev(&udev->hw, UBASE_CRQ_BASEADDR_L_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CRQ_BASEADDR_H_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CRQ_DEPTH_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CRQ_HEAD_REG, 0); + ubase_write_dev(&udev->hw, UBASE_CRQ_TAIL_REG, 0); + + spin_unlock(&crq->lock); + spin_unlock_bh(&csq->lock); +} + +static void ubase_write_desc_to_cmdq(struct ubase_dev *udev, + struct ubase_cmdq_desc *desc, int num) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + struct ubase_cmdq_desc *desc_to_use; + int cnt = 0; + + while (cnt < num) { + desc_to_use = &csq->desc[csq->pi]; + *desc_to_use = desc[cnt]; + (csq->pi)++; + if (csq->pi >= csq->desc_num) + csq->pi = 0; + cnt++; + } + + ubase_write_dev(&udev->hw, UBASE_CSQ_TAIL_REG, csq->pi); +} + +static int ubase_remain_cmdq_space(struct ubase_cmdq_ring *csq) +{ + u32 used = (csq->pi - csq->ci + csq->desc_num) % csq->desc_num; + + return csq->desc_num - used - 1; +} + +static bool ubase_wait_for_resp(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + u32 timeout = 0; + u32 ci; + + do { + ci = ubase_read_dev(&udev->hw, UBASE_CSQ_HEAD_REG); + if (ci == csq->pi) + return true; + udelay(1); + timeout++; + } while (timeout < csq->tx_timeout); + + return false; +} + +static int ubase_get_cmd_result(struct ubase_dev *udev, + struct ubase_cmdq_desc *desc, + int num, u32 sw_pi) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + u32 pi = sw_pi; + int handle; + u16 ret; + + for (handle = 0; handle < num; handle++) { + desc[handle] = csq->desc[pi]; + pi++; + if (pi >= csq->desc_num) + pi = 0; + } + + if (desc->flag & UBASE_CMD_FLAG_OUT) + ret = le16_to_cpu(desc->ret); + else + ret = ETIMEDOUT; + + return -ret; +} + +static int ubase_csq_data_is_valid(struct ubase_dev *udev, u32 hw_ci) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + u32 sw_ci = csq->ci; + u32 sw_pi = csq->pi; + + if (sw_pi > sw_ci) + return hw_ci >= sw_ci && hw_ci <= sw_pi; + + return hw_ci >= sw_ci || hw_ci <= sw_pi; +} + +static int ubase_csq_clean(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + s64 hw_ci; + int clean; + + hw_ci = ubase_read_dev(&udev->hw, UBASE_CSQ_HEAD_REG); + /* Make sure head is ready before touch any data */ + rmb(); + if (!ubase_csq_data_is_valid(udev, hw_ci)) { + ubase_warn(udev, + "the cmd head is incorrect! cmd head = (%lld, %u-%u).\n", + hw_ci, csq->pi, csq->ci); + ubase_warn(udev, + "any further commands to the firmware are disabled!\n"); + set_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); + ubase_warn(udev, + "the firmware watchdog is expected to reset soon!\n"); + return -EIO; + } + + clean = (int)((hw_ci - (s64)(csq->ci) + (s64)(csq->desc_num)) % + (s64)(csq->desc_num)); + csq->ci = hw_ci; + + return clean; +} + +int ubase_send_cmd(struct ubase_dev *udev, + struct ubase_cmdq_desc *desc, int num) +{ + struct ubase_cmdq_ring *csq = &udev->hw.cmdq.csq; + bool is_completed = false; + int cleaned; + u32 sw_pi; + int ret; + + spin_lock_bh(&csq->lock); + if (test_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state)) { + ret = -EBUSY; + goto err_unlock; + } + + if (num > ubase_remain_cmdq_space(csq)) { + csq->ci = ubase_read_dev(&udev->hw, UBASE_CSQ_HEAD_REG); + ubase_warn(udev, + "the requested space exceeds the remaining space.\n"); + ret = -EBUSY; + goto err_unlock; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + sw_pi = csq->pi; + + ubase_write_desc_to_cmdq(udev, desc, num); + is_completed = ubase_wait_for_resp(udev); + if (!is_completed) { + ret = -EBADE; + goto err_clr_cmdq; + } + ret = ubase_get_cmd_result(udev, desc, num, sw_pi); + +err_clr_cmdq: + cleaned = ubase_csq_clean(udev); + if (cleaned < 0) + ret = cleaned; + else if (cleaned != num) + ubase_warn(udev, + "cleaned %dBD, need to clean %dBD.\n", cleaned, num); +err_unlock: + spin_unlock_bh(&csq->lock); + + return ret; +} + +static int ubase_cmd_query_version(struct ubase_dev *udev, u32 *fw_version) +{ + struct ubase_query_version_cmd *resp; + struct ubase_cmdq_desc desc; + int ret; + + ubase_cmd_setup_basic_desc(&desc, UBASE_OPC_QUERY_FW_VER, true, 1); + ret = ubase_send_cmd(udev, &desc, 1); + if (ret) { + ubase_err(udev, "failed to query fw version, ret = %d.\n", + ret); + return ret; + } + + resp = (struct ubase_query_version_cmd *)desc.data; + *fw_version = le32_to_cpu(resp->firmware); + + ubase_info(udev, "The firmware version is %u.%u.%u.%u\n", + u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE3_MASK), + u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE2_MASK), + u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE1_MASK), + u32_get_bits(*fw_version, UBASE_FW_VERSION_BYTE0_MASK)); + + return 0; +} + +static inline void ubase_crq_table_init(struct ubase_dev *udev) +{ + struct ubase_crq_table *crq_table = &udev->crq_table; + + mutex_init(&crq_table->lock); + INIT_LIST_HEAD(&crq_table->nbs.list); +} + +static inline void ubase_crq_table_uninit(struct ubase_dev *udev) +{ + struct ubase_crq_table *crq_table = &udev->crq_table; + + mutex_destroy(&crq_table->lock); +} + +int ubase_cmd_init(struct ubase_dev *udev) +{ + int ret; + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_crq_table_init(udev); + + ret = ubase_cmd_queue_init(udev); + if (ret) { + ubase_err(udev, "failed to init ubase cmd queue.\n"); + goto err_queue_init; + } + + ubase_cmd_init_regs(udev); + + clear_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); + + ret = ubase_cmd_query_version(udev, &udev->caps.dev_caps.fw_version); + if (ret) + goto err_query_version; + + return 0; + +err_query_version: + set_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); + ubase_cmd_uninit_regs(udev); + ubase_cmd_queue_uninit(udev); +err_queue_init: + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_crq_table_uninit(udev); + + return ret; +} + +void ubase_cmd_disable(struct ubase_dev *udev) +{ + __ubase_cmd_disable(udev); + /* wait to ensure the firmware completes csq commands. */ + msleep(UBASE_CMDQ_CLEAR_WAIT_TIME); + + ubase_cmd_uninit_regs(udev); +} + +void ubase_cmd_uninit(struct ubase_dev *udev) +{ + if (udev->reset_stage != UBASE_RESET_STAGE_UNINIT) { + ubase_cmd_disable(udev); + /* wait to ensure the firmware completes crq commands. */ + msleep(UBASE_CMDQ_CLEAR_WAIT_TIME); + } + + ubase_cmd_queue_uninit(udev); + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_crq_table_uninit(udev); +} + +void ubase_cmd_setup_basic_desc(struct ubase_cmdq_desc *desc, + enum ubase_opcode_type opcode, bool is_read, + u8 num) +{ + memset(desc, 0, sizeof(*desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = UBASE_CMD_FLAG_NO_INTR | UBASE_CMD_FLAG_IN; + desc->bd_num = num; + + if (is_read) + desc->flag |= cpu_to_le16(UBASE_CMD_FLAG_WR); +} + +static u16 ubase_calc_bd_num(struct ubase_cmd_buf *buf) +{ + u32 data_size = buf->data_size > UBASE_CMD_DATA_LENGTH ? + buf->data_size - UBASE_CMD_DATA_LENGTH : 0; + + return DIV_ROUND_UP(data_size, sizeof(struct ubase_cmdq_desc)) + 1; +} + +static int ubase_cmd_buf_check(struct ubase_dev *udev, struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out, u16 *num) +{ + if (in->is_read && (!out || !out->data || out->data_size == 0)) { + ubase_err(udev, "output buffer is empty.\n"); + return -EINVAL; + } + + *num = max_t(u16, ubase_calc_bd_num(in), ubase_calc_bd_num(out)); + if (*num > UBASE_CMDQ_DESC_NUM) { + ubase_err(udev, + "requested space(%u) exceeds the maximum(%lu).\n", + max_t(u32, in->data_size, out->data_size), + UBASE_CMD_MAX_DESC_SIZE); + return -EINVAL; + } + + return 0; +} + +static void ubase_cmd_setup_desc_by_inbuf(struct ubase_dev *udev, + struct ubase_cmd_buf *in, + struct ubase_cmdq_desc *desc, + u16 num) +{ + ubase_cmd_setup_basic_desc(&desc[0], in->opcode, in->is_read, num); + if (in->data) + memcpy(desc->data, in->data, in->data_size); +} + +static void ubase_cmd_setup_desc_by_outbuf(struct ubase_dev *udev, + struct ubase_cmd_buf *out, + struct ubase_cmdq_desc *desc) +{ + if (!out || out->data_size == 0) + return; + + out->opcode = desc[0].opcode; + out->is_read = (desc[0].flag & UBASE_CMD_FLAG_WR) ? true : false; + memcpy(out->data, desc->data, out->data_size); +} + +static int ubase_cmd_send_inout_real(struct ubase_dev *udev, + struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out) +{ + struct ubase_cmdq_desc *desc; + u16 num; + int ret; + + ret = ubase_cmd_buf_check(udev, in, out, &num); + if (ret) + return ret; + + desc = kcalloc(num, sizeof(struct ubase_cmdq_desc), GFP_ATOMIC); + if (!desc) { + ubase_err(udev, "failed to alloc desc, size = %lu.\n", + num * sizeof(struct ubase_cmdq_desc)); + return -ENOMEM; + } + + ubase_cmd_setup_desc_by_inbuf(udev, in, desc, num); + + ret = ubase_send_cmd(udev, desc, num); + if (ret) + goto err_send_cmd; + + ubase_cmd_setup_desc_by_outbuf(udev, out, desc); + +err_send_cmd: + kfree(desc); + + return ret; +} + +static void ubase_free_bd_data(void *msg_data, u32 bd_num) +{ + if (!msg_data || bd_num <= 1) + return; + + kfree(msg_data); +} + +static void ubase_cmd_exec_callback(struct ubase_dev *udev, u16 opcode, + void *msg_data, u32 msg_data_len) +{ + struct ubase_crq_table *crq_table = &udev->crq_table; + struct ubase_crq_event_nbs *nbs; + + if (!msg_data) + return; + + mutex_lock(&crq_table->lock); + list_for_each_entry(nbs, &crq_table->nbs.list, list) { + if (nbs->nb.opcode == opcode) { + nbs->nb.crq_handler(nbs->nb.back, msg_data, + msg_data_len); + break; + } + } + mutex_unlock(&crq_table->lock); +} + +static void ubase_gen_multi_bd_data(struct ubase_dev *udev, u32 bd_num, + void **msg_data, u32 msg_data_len) +{ + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + struct ubase_cmdq_desc *desc; + u32 pos = 0; + u32 i; + + *msg_data = kzalloc(msg_data_len, GFP_KERNEL); + if (!(*msg_data)) { + ubase_err(udev, "failed to alloc crq msg data."); + return; + } + + for (i = 0; i < bd_num; i++) { + desc = &crq->desc[crq->ci]; + if (i == 0) { + memcpy(*msg_data + pos, + desc->data, UBASE_CMD_DATA_LENGTH); + pos += UBASE_CMD_DATA_LENGTH; + } else { + memcpy(*msg_data + pos, desc, sizeof(*desc)); + pos += sizeof(*desc); + } + + UBASE_MOVE_CRQ_RING_PTR(crq); + } +} + +static void ubase_gen_single_bd_data(struct ubase_dev *udev, void **msg_data) +{ + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + struct ubase_cmdq_desc *desc; + + desc = &crq->desc[crq->ci]; + *msg_data = crq->desc[crq->ci].data; + UBASE_MOVE_CRQ_RING_PTR(crq); +} + +static void ubase_gen_bd_data(struct ubase_dev *udev, u32 bd_num, + void **msg_data, u32 msg_data_len) +{ + if (bd_num == 1) { + ubase_gen_single_bd_data(udev, msg_data); + return; + } + + ubase_gen_multi_bd_data(udev, bd_num, msg_data, msg_data_len); +} + +static bool ubase_cmd_crq_empty(struct ubase_dev *udev, struct ubase_hw *hw) +{ + hw->cmdq.crq.pi = ubase_read_dev(hw, UBASE_CRQ_TAIL_REG); + + return hw->cmdq.crq.pi == hw->cmdq.crq.ci; +} + +void ubase_cmd_crq_handler(struct ubase_dev *udev) +{ + struct ubase_cmdq_ring *crq = &udev->hw.cmdq.crq; + u32 msg_data_len; + void *msg_data; + u16 opcode; + u8 bd_num; + u8 flag; + + while (!ubase_cmd_crq_empty(udev, &udev->hw)) { + if (test_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state)) { + ubase_warn(udev, + "command queue needs re-initializing.\n"); + return; + } + + opcode = crq->desc[crq->ci].opcode; + bd_num = crq->desc[crq->ci].bd_num; + flag = crq->desc[crq->ci].flag; + msg_data_len = bd_num * sizeof(struct ubase_cmdq_desc) - + UBASE_CMD_HEADER_LENGTH; + + if (unlikely(!bd_num || !(flag & UBASE_CMD_FLAG_OUT))) { + ubase_err(udev, + "drop invalid crq message, opcode = 0x%x, bd_num = %u, flag = 0x%x.", + opcode, bd_num, flag); + UBASE_MOVE_CRQ_RING_PTR(crq); + continue; + } + + ubase_gen_bd_data(udev, bd_num, &msg_data, msg_data_len); + + ubase_cmd_exec_callback(udev, opcode, msg_data, + msg_data_len); + + ubase_free_bd_data(msg_data, bd_num); + } + + ubase_write_dev(&udev->hw, UBASE_CRQ_HEAD_REG, crq->ci); +} + +void ubase_crq_service_task(struct ubase_delay_work *ubase_work) +{ + struct ubase_dev *udev = container_of(ubase_work, struct ubase_dev, + service_task); + struct ubase_crq_table *crq_table = &udev->crq_table; + + if (!test_and_clear_bit(UBASE_STATE_CRQ_SERVICE_SCHED, + &udev->service_task.state) || + test_and_set_bit(UBASE_STATE_CRQ_HANDLING, + &udev->service_task.state)) + return; + + if (time_is_before_eq_jiffies(crq_table->last_crq_scheduled + + UBASE_CRQ_SCHED_TIMEOUT)) + ubase_warn(udev, + "crq service task is scheduled after %ums on cpu%d!\n", + jiffies_to_msecs(jiffies - crq_table->last_crq_scheduled), + smp_processor_id()); + + ubase_cmd_crq_handler(udev); + + clear_bit(UBASE_STATE_CRQ_HANDLING, &udev->service_task.state); +} + +static bool ubase_cmd_is_mbx_avail(struct ubase_dev *udev) +{ + return true; +} + +int ubase_cmd_mbx_event_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct ubase_event_nb *ev_nb = container_of(nb, struct ubase_event_nb, nb); + struct ubase_aeq_notify_info *info = data; + struct ubase_aeqe *aeqe = info->aeqe; + struct ubase_dev *udev = ev_nb->back; + struct ubase_mbx_event_context *ctx; + + ctx = &udev->mb_cmd.ctx; + if (aeqe->event.cmd.seq_num != ctx->seq_num) { + ubase_err(udev, + "mbx seq num is different, cmd seq_num = %u, ctx seq_num = %u.\n", + aeqe->event.cmd.seq_num, ctx->seq_num); + return NOTIFY_DONE; + } + + ctx->result = aeqe->event.cmd.status == 0 ? 0 : -EIO; + ctx->out_param = aeqe->event.cmd.out_param; + + complete(&ctx->done); + + return NOTIFY_OK; +} + +static int ubase_cmd_wait_mbx_completed(struct ubase_dev *udev, + union ubase_mbox *mbx) +{ + struct ubase_mbx_event_context *ctx = &udev->mb_cmd.ctx; + int ret; + + if (!wait_for_completion_timeout(&ctx->done, + msecs_to_jiffies(UBASE_CMDQ_MBX_TX_TIMEOUT))) { + ubase_err(udev, + "cmd seq_num 0x%x mailbox cmd code 0x%x timeout.\n", + ctx->seq_num, mbx->cmd); + return -EBUSY; + } + + ret = ctx->result; + if (ret) + ubase_err(udev, + "cmd seq_num(0x%x) mailbox cmd code(0x%x) error, ret = %d.\n", + ctx->seq_num, mbx->cmd, ret); + + return ret; +} + +static void ubase_setup_mbx_info(struct ubase_dev *udev, union ubase_mbox *mbx) +{ + mbx->seq_num = ++udev->mb_cmd.ctx.seq_num; + mbx->event_en = 1; +} + +int ubase_post_mailbox_by_event(struct ubase_dev *udev, + struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out) +{ + union ubase_mbox *mbx = (union ubase_mbox *)in->data; + unsigned long end; + int ret; + + if (!mbx) { + ubase_err(udev, "input mailbox data field is empty.\n"); + return -EINVAL; + } + + ubase_setup_mbx_info(udev, mbx); + + end = msecs_to_jiffies(UBASE_CMDQ_MBX_TX_TIMEOUT) + jiffies; + while (ubase_cmd_is_mbx_avail(udev)) { + ret = __ubase_cmd_send_inout(udev, in, out); + if (!ret) + break; + + if (time_after(jiffies, end)) { + dev_err_ratelimited(udev->dev, + "failed to wait mbox.\n"); + return -ETIMEDOUT; + } + + cond_resched(); + } + + return ubase_cmd_wait_mbx_completed(udev, mbx); +} + +int __ubase_cmd_send_inout(struct ubase_dev *udev, struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out) +{ + if (!in) { + ubase_err(udev, "input buffer is empty.\n"); + return -EINVAL; + } + + if (udev->reset_stage == UBASE_RESET_STAGE_UNINIT) + return -EAGAIN; + + return ubase_cmd_send_inout_real(udev, in, out); +} + +int __ubase_cmd_send_in(struct ubase_dev *udev, struct ubase_cmd_buf *in) +{ + struct ubase_cmd_buf out; + + out.data_size = 0; + + return __ubase_cmd_send_inout(udev, in, &out); +} + +/** + * When uninstalling, cmdq needs to be successfully sended as much as possible, + * but the cmd may be disabled during reset, this interface attempts to send cmd + * when it is enabled. + */ +int ubase_cmd_send_inout_ex(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, struct ubase_cmd_buf *out, + u32 time_out) +{ + struct ubase_dev *udev; + u32 try_cnt = 0; + + if (!aux_dev || !in || !out) + return -EINVAL; + + udev = ubase_get_udev_by_adev(aux_dev); + if (!time_out) + return __ubase_cmd_send_inout(udev, in, out); + + while (test_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state) && + (try_cnt * UBASE_CMDQ_WAIT_TIME) < time_out) { + msleep(UBASE_CMDQ_WAIT_TIME); + try_cnt++; + } + + if ((try_cnt * UBASE_CMDQ_WAIT_TIME) >= time_out) { + ubase_warn(udev, + "cmd send timeout, due to cmd enter disable state for %ums.\n", + try_cnt * UBASE_CMDQ_WAIT_TIME); + return -EBUSY; + } + + return __ubase_cmd_send_inout(udev, in, out); +} +EXPORT_SYMBOL(ubase_cmd_send_inout_ex); + +int ubase_cmd_send_inout(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out) +{ + if (!aux_dev || !in || !out) + return -EINVAL; + + return __ubase_cmd_send_inout(__ubase_get_udev_by_adev(aux_dev), in, out); +} +EXPORT_SYMBOL(ubase_cmd_send_inout); + +int ubase_cmd_send_in_ex(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, u32 time_out) +{ + struct ubase_cmd_buf out; + + if (!aux_dev || !in) + return -EINVAL; + + out.data_size = 0; + + return ubase_cmd_send_inout_ex(aux_dev, in, &out, time_out); +} +EXPORT_SYMBOL(ubase_cmd_send_in_ex); + +int ubase_cmd_send_in(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in) +{ + if (!aux_dev || !in) + return -EINVAL; + + return __ubase_cmd_send_in(__ubase_get_udev_by_adev(aux_dev), in); +} +EXPORT_SYMBOL(ubase_cmd_send_in); + +static int __ubase_cmd_get_data_size(struct ubase_dev *udev, u16 opcode, + u16 *data_size) +{ + struct ubase_cmdq_desc desc; + u8 bd_num; + int ret; + + ubase_cmd_setup_basic_desc(&desc, opcode, true, 1); + desc.flag |= UBASE_CMD_FLAG_GET_BD_NUM; + + ret = ubase_send_cmd(udev, &desc, 1); + if (ret) { + ubase_err(udev, + "failed to send cmd in get cmd data size, opcode = 0x%x, ret = %d.\n", + opcode, ret); + return ret; + } + + bd_num = *(u8 *)desc.data; + if (unlikely(!bd_num)) { + ubase_err(udev, + "failed to get cmd data size, bd_num = %u, opcode = 0x%x.\n", + bd_num, opcode); + return -EIO; + } + + *data_size = bd_num * sizeof(desc) - UBASE_CMD_HEADER_LENGTH; + + return 0; +} + +int ubase_cmd_get_data_size(struct auxiliary_device *aux_dev, u16 opcode, + u16 *data_size) +{ + if (!aux_dev || !data_size) + return -EINVAL; + + return __ubase_cmd_get_data_size(__ubase_get_udev_by_adev(aux_dev), + opcode, data_size); +} +EXPORT_SYMBOL(ubase_cmd_get_data_size); + int __ubase_register_crq_event(struct ubase_dev *udev, struct ubase_crq_event_nb *nb) { diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index 0f1d92ebe511..70c3f4d00719 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -11,8 +11,88 @@ #include "ubase_dev.h" +#define UBASE_CMDQ_DESC_NUM_S 3 +#define UBASE_CMDQ_DESC_NUM 1024 +#define UBASE_CMDQ_TX_TIMEOUT 3000000 +#define UBASE_CMDQ_MBX_TX_TIMEOUT 30000 +#define UBASE_CMDQ_CLEAR_WAIT_TIME 200 +#define UBASE_CMDQ_WAIT_TIME 10 + +#define UBASE_CMD_FLAG_IN BIT(0) +#define UBASE_CMD_FLAG_OUT BIT(1) +#define UBASE_CMD_FLAG_NEXT BIT(2) +#define UBASE_CMD_FLAG_WR BIT(3) +#define UBASE_CMD_FLAG_NO_INTR BIT(4) +#define UBASE_CMD_FLAG_ERR_INTR BIT(5) +#define UBASE_CMD_FLAG_GET_BD_NUM BIT(6) + +#define UBASE_UE2UE_MSG_WAIT_TIME 3000 + +#define UBASE_CRQ_SCHED_TIMEOUT (HZ / 2) + +#define UBASE_CMD_HEADER_LENGTH 8 +#define UBASE_CMD_DATA_LENGTH (UBASE_DESC_DATA_LEN * sizeof(__le32)) +#define UBASE_CMD_MAX_DESC_SIZE \ + (UBASE_CMDQ_DESC_NUM * sizeof(struct ubase_cmdq_desc)) + +#define UBASE_MOVE_CRQ_RING_PTR(crq) \ + ((crq)->ci = ((crq)->ci + 1) % (crq)->desc_num) + +enum ubase_cmd_state { + UBASE_STATE_CMD_DISABLE +}; + +struct ubase_query_version_cmd { + __le32 firmware; + __le32 hardware; + __le32 rsv; + __le32 caps[UBASE_CAP_LEN]; +}; + +static inline void __ubase_fill_inout_buf(struct ubase_cmd_buf *buf, u16 opcode, + bool is_read, u32 data_size, void *data) +{ + buf->opcode = opcode; + buf->is_read = is_read; + buf->data_size = data_size; + buf->data = data; +} + +int ubase_cmd_init(struct ubase_dev *udev); +void ubase_cmd_uninit(struct ubase_dev *udev); + +static inline void __ubase_cmd_enable(struct ubase_dev *udev) +{ + clear_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); +} + +static inline void __ubase_cmd_disable(struct ubase_dev *udev) +{ + set_bit(UBASE_STATE_CMD_DISABLE, &udev->hw.state); +} + +void ubase_cmd_disable(struct ubase_dev *udev); + +void ubase_cmd_setup_basic_desc(struct ubase_cmdq_desc *desc, + enum ubase_opcode_type opcode, bool is_read, + u8 num); +int ubase_send_cmd(struct ubase_dev *udev, + struct ubase_cmdq_desc *desc, int num); + +int ubase_post_mailbox_by_event(struct ubase_dev *udev, + struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out); +int __ubase_cmd_send_in(struct ubase_dev *udev, struct ubase_cmd_buf *in); +int __ubase_cmd_send_inout(struct ubase_dev *udev, struct ubase_cmd_buf *in, + struct ubase_cmd_buf *out); + +int ubase_cmd_mbx_event_cb(struct notifier_block *nb, unsigned long action, + void *data); + int __ubase_register_crq_event(struct ubase_dev *udev, struct ubase_crq_event_nb *nb); void __ubase_unregister_crq_event(struct ubase_dev *udev, u16 opcode); +void ubase_crq_service_task(struct ubase_delay_work *ubase_work); + #endif diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index be052e9cad61..8c102000e7ef 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -9,6 +9,7 @@ #include "debugfs/ubase_debugfs.h" #include "ubase_cmd.h" +#include "ubase_mailbox.h" #include "ubase_dev.h" #define UBASE_PERIOD_100MS 100 @@ -435,7 +436,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "init cmd queue", UBASE_SUP_ALL, 1, - NULL, NULL + ubase_cmd_init, ubase_cmd_uninit }, { "query dev res", UBASE_SUP_ALL, 0, @@ -443,7 +444,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "init mailbox", UBASE_SUP_NO_PMU, 0, - NULL, NULL + ubase_mbox_cmd_init, ubase_mbox_cmd_uninit }, { "query chip info", UBASE_SUP_ALL, 0, diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index fe587046b291..ee59ac41c8be 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -83,6 +83,12 @@ struct ubase_dma_buf { size_t size; }; +struct ubase_ctx_page { + dma_addr_t iova; + u32 npage; + refcount_t refcount; +}; + struct ubase_ta_layer_ctx { struct ubase_dma_buf extdb_buf; struct ubase_dma_buf timer_buf; @@ -242,7 +248,6 @@ static inline struct ubase_dev *__ubase_get_udev_by_adev(struct auxiliary_device *adev) { struct ubase_adev *uadev = container_of(adev, struct ubase_adev, adev); - return uadev->udev; } diff --git a/drivers/ub/ubase/ubase_mailbox.c b/drivers/ub/ubase/ubase_mailbox.c new file mode 100644 index 000000000000..9951929a7f8d --- /dev/null +++ b/drivers/ub/ubase/ubase_mailbox.c @@ -0,0 +1,542 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include +#include +#include + +#include "ubase_cmd.h" +#include "ubase_mailbox.h" + +int ubase_mbox_cmd_init(struct ubase_dev *udev) +{ + struct ubase_mbx_event_context *ctx = &udev->mb_cmd.ctx; + + udev->mb_cmd.pool = dma_pool_create("ubase_mbox", udev->dev, + UBASE_MAILBOX_SIZE, + UBASE_MAILBOX_SIZE, 0); + if (!udev->mb_cmd.pool) + return -ENOMEM; + + sema_init(&udev->mb_cmd.sem, 1); + init_completion(&ctx->done); + + return 0; +} + +void ubase_mbox_cmd_uninit(struct ubase_dev *udev) +{ + if (!udev->mb_cmd.pool) + return; + + dma_pool_destroy(udev->mb_cmd.pool); + udev->mb_cmd.pool = NULL; +} + +struct ubase_cmd_mailbox *__ubase_alloc_cmd_mailbox(struct ubase_dev *udev) +{ + struct ubase_cmd_mailbox *mailbox; + + if (!udev->mb_cmd.pool) { + ubase_err(udev, "failed to alloc mailbox, pool is null.\n"); + return NULL; + } + + mailbox = kzalloc(sizeof(*mailbox), GFP_KERNEL); + if (!mailbox) { + ubase_err(udev, "failed to alloc mailbox.\n"); + goto failed_alloc_mailbox; + } + + mailbox->buf = dma_pool_zalloc(udev->mb_cmd.pool, GFP_KERNEL, + &mailbox->dma); + if (!mailbox->buf) { + ubase_err(udev, "failed to alloc buffer of mailbox.\n"); + goto failed_alloc_mailbox_buf; + } + + return mailbox; + +failed_alloc_mailbox_buf: + kfree(mailbox); +failed_alloc_mailbox: + return NULL; +} + +struct ubase_cmd_mailbox *ubase_alloc_cmd_mailbox(struct auxiliary_device *aux_dev) +{ + struct ubase_dev *udev; + + if (!aux_dev) + return NULL; + + udev = __ubase_get_udev_by_adev(aux_dev); + + return __ubase_alloc_cmd_mailbox(udev); +} +EXPORT_SYMBOL(ubase_alloc_cmd_mailbox); + +void __ubase_free_cmd_mailbox(struct ubase_dev *udev, + struct ubase_cmd_mailbox *mailbox) +{ + if (!mailbox) { + ubase_err(udev, "Invalid mailbox.\n"); + return; + } + + dma_pool_free(udev->mb_cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +void ubase_free_cmd_mailbox(struct auxiliary_device *aux_dev, + struct ubase_cmd_mailbox *mailbox) +{ + struct ubase_dev *udev; + + if (!aux_dev) + return; + + udev = __ubase_get_udev_by_adev(aux_dev); + + __ubase_free_cmd_mailbox(udev, mailbox); +} +EXPORT_SYMBOL(ubase_free_cmd_mailbox); + +static int ubase_post_mailbox(struct ubase_dev *udev, + struct ubase_cmdq_desc *desc, + u32 timeout, u8 *complete_status, bool is_read) +{ + union ubase_mbox *mb; + unsigned long end; + int ret; + + end = msecs_to_jiffies(timeout) + jiffies; + mb = (union ubase_mbox *)desc->data; + + while (1) { + desc->flag = cpu_to_le16(UBASE_CMD_FLAG_NO_INTR | UBASE_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(UBASE_CMD_FLAG_WR); + + ret = ubase_send_cmd(udev, desc, 1); + if (!ret && !(is_read ? mb->query_hw_run : mb->hw_run)) + break; + + if (time_after(jiffies, end)) { + dev_err_ratelimited(udev->dev, + "failed to wait mbox.\n"); + return -ETIMEDOUT; + } + + cond_resched(); + } + + if (!ret) + *complete_status = + (u8)(is_read ? mb->query_status : mb->status); + + return ret; +} + +static int ubase_poll_mbox_done(struct ubase_dev *udev, uint32_t timeout) +{ + struct ubase_cmdq_desc desc; + u8 status = 0; + int ret; + + ubase_cmd_setup_basic_desc(&desc, UBASE_OPC_QUERY_MB_ST, true, 1); + + ret = ubase_post_mailbox(udev, &desc, timeout, &status, true); + if (!ret) { + if (!status) { + ubase_info(udev, + "failed to query ubase mailbox, status = %u.\n", + status); + return -EBUSY; + } + } else + dev_err_ratelimited(udev->dev, + "failed to check mbox status = %u, ret = %d.\n", + status, ret); + + return ret; +} + +static void ubase_mbox_desc_init(struct ubase_dev *udev, union ubase_mbox *mb, + u64 in_param, struct ubase_mbx_attr *attr) +{ + memset(mb, 0, sizeof(*mb)); + mb->in_param_l = cpu_to_le32(lower_32_bits(in_param)); + mb->in_param_h = cpu_to_le32(upper_32_bits(in_param)); + mb->cmd = attr->op; + mb->tag = attr->tag; + mb->mbx_ue_id = attr->mbx_ue_id; +} + +static int ubase_cmd_mbox_poll(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + struct ubase_cmdq_desc desc; + union ubase_mbox *mb; + u8 status = 0; + int ret; + + if (udev->reset_stage == UBASE_RESET_STAGE_UNINIT) + return 0; + + mb = (union ubase_mbox *)desc.data; + + ubase_cmd_setup_basic_desc(&desc, UBASE_OPC_POST_MB, false, 1); + + ubase_mbox_desc_init(udev, mb, (u64)mailbox->dma, attr); + + mb->seq_num = CMD_MBX_POLL_VALUE; + + ret = ubase_post_mailbox(udev, &desc, UBASE_MBX_TX_TIMEOUT, &status, + false); + if (ret) { + ubase_err(udev, + "failed to post mailbox 0x%x in poll mode, status = %u, ret = %d.\n", + attr->op, status, ret); + return ret; + } + + return ubase_poll_mbox_done(udev, UBASE_MBX_TX_TIMEOUT); +} + +static int ubase_cmd_mbox_event(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + union ubase_mbox mb_out = {0}; + union ubase_mbox mb_in = {0}; + struct ubase_cmd_buf in, out; + int ret; + + if (udev->reset_stage == UBASE_RESET_STAGE_UNINIT) + return 0; + + ubase_mbox_desc_init(udev, &mb_in, (u64)mailbox->dma, attr); + + mb_in.event_en = 1; + __ubase_fill_inout_buf(&in, UBASE_OPC_POST_MB, false, + sizeof(union ubase_mbox), (void *)&mb_in); + __ubase_fill_inout_buf(&out, UBASE_OPC_POST_MB, false, + sizeof(union ubase_mbox), (void *)&mb_out); + + ret = ubase_post_mailbox_by_event(udev, &in, &out); + if (ret) + ubase_err(udev, + "failed to post mailbox 0x%x in event mode, ret = %d.\n", + attr->op, ret); + + return ret; +} + +int ubase_create_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_ctx_page **ctx_page, u32 npage) +{ + int ret; + + *ctx_page = kzalloc(sizeof(struct ubase_ctx_page), GFP_KERNEL); + if (!(*ctx_page)) + return -ENOMEM; + + (*ctx_page)->iova = ctx_buf->dma_ctx_buf_ba + npage * PAGE_SIZE; + refcount_set(&(*ctx_page)->refcount, 1); + (*ctx_page)->npage = npage; + ret = ummu_fill_pages(ctx_buf->slot, (*ctx_page)->iova, + UBASE_IOVA_COMM_PFN_CNT); + if (ret) { + ubase_err(udev, "failed to fill pages in ummu, ret = %d\n", ret); + kfree(*ctx_page); + *ctx_page = NULL; + } + + return ret; +} + +void ubase_destroy_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_page *ctx_page, + struct ubase_ctx_buf_cap *ctx_buf) +{ + int ret; + + ret = ummu_drain_pages(ctx_buf->slot, ctx_page->iova, + UBASE_IOVA_COMM_PFN_CNT); + if (ret) + ubase_err(udev, + "failed to drain pages in ummu, npage = %u, ret = %d.\n", + ctx_page->npage, ret); + + kfree(ctx_page); +} + +static int ubase_use_buf_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, u32 tag) +{ + u32 cnt_per_page_shift = ctx_buf->cnt_per_page_shift; + u32 npage = tag >> cnt_per_page_shift; + struct ubase_ctx_page *ctx_page; + int ret; + + mutex_lock(&ctx_buf->ctx_mutex); + + ctx_page = (struct ubase_ctx_page *)xa_load(&ctx_buf->ctx_xa, npage); + if (!ctx_page) { + ret = ubase_create_ctx_page(udev, ctx_buf, &ctx_page, npage); + if (ret) { + ubase_err(udev, "failed to create context page, ret = %d.\n", + ret); + goto err_create; + } + + ret = xa_err(xa_store(&ctx_buf->ctx_xa, npage, ctx_page, + GFP_KERNEL)); + if (ret) { + ubase_err(udev, "failed to store page, ret = %d.\n", + ret); + goto err_store; + } + } + + refcount_inc(&ctx_page->refcount); + mutex_unlock(&ctx_buf->ctx_mutex); + + return 0; +err_store: + ubase_destroy_ctx_page(udev, ctx_page, ctx_buf); +err_create: + mutex_unlock(&ctx_buf->ctx_mutex); + + return ret; +} + +static void ubase_free_buf_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, u32 tag) +{ + struct ubase_ctx_page *ctx_page; + u32 cnt_per_page_shift; + u32 npage; + + if (!ctx_buf) + return; + + cnt_per_page_shift = ctx_buf->cnt_per_page_shift; + npage = tag >> cnt_per_page_shift; + + mutex_lock(&ctx_buf->ctx_mutex); + + ctx_page = (struct ubase_ctx_page *)xa_load(&ctx_buf->ctx_xa, npage); + if (!ctx_page) { + ubase_err(udev, + "no find ctx page in free buf page, npage = %u.\n", + npage); + mutex_unlock(&ctx_buf->ctx_mutex); + return; + } + + refcount_dec(&ctx_page->refcount); + if (refcount_dec_if_one(&ctx_page->refcount)) { + ubase_info(udev, + "refcout of ctx page is equal to one and the ctx_page is going to erased.\n"); + xa_erase(&ctx_buf->ctx_xa, npage); + ubase_destroy_ctx_page(udev, ctx_page, ctx_buf); + } + + mutex_unlock(&ctx_buf->ctx_mutex); +} + +static struct ubase_ctx_buf_cap* +ubase_parse_common_buf(struct ubase_mbx_attr *attr, + struct mbx_op_match op_matches[], + enum ubase_mb_type *type, u32 size) +{ + u32 i; + + for (i = 0; i < size; i++) { + if (attr->op == op_matches[i].op) { + *type = op_matches[i].type; + return op_matches[i].ctx_caps; + } + } + + return NULL; +} + +static bool ubase_is_jfs_opcode(u8 op) +{ + switch (op) { + case UBASE_MB_CREATE_JFS_CONTEXT: + case UBASE_MB_MODIFY_JFS_CONTEXT: + case UBASE_MB_DESTROY_JFS_CONTEXT: + case UBASE_MB_QUERY_JFS_CONTEXT: + return true; + default: + return false; + } +} + +static struct ubase_ctx_buf_cap* +ubase_parse_ta_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, + enum ubase_mb_type *type) +{ + struct mbx_op_match ta_matches[] = { + {UBASE_MB_CREATE_JFS_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.jfs}, + {UBASE_MB_MODIFY_JFS_CONTEXT, UBASE_MB_MODIFY, &udev->ctx_buf.jfs}, + {UBASE_MB_DESTROY_JFS_CONTEXT, UBASE_MB_DESTROY, &udev->ctx_buf.jfs}, + {UBASE_MB_QUERY_JFS_CONTEXT, UBASE_MB_QUERY, &udev->ctx_buf.jfs}, + {UBASE_MB_CREATE_JFC_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.jfc}, + {UBASE_MB_MODIFY_JFC_CONTEXT, UBASE_MB_MODIFY, &udev->ctx_buf.jfc}, + {UBASE_MB_DESTROY_JFC_CONTEXT, UBASE_MB_DESTROY, &udev->ctx_buf.jfc}, + {UBASE_MB_QUERY_JFC_CONTEXT, UBASE_MB_QUERY, &udev->ctx_buf.jfc}, + {UBASE_MB_CREATE_JFR_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.jfr}, + {UBASE_MB_MODIFY_JFR_CONTEXT, UBASE_MB_MODIFY, &udev->ctx_buf.jfr}, + {UBASE_MB_DESTROY_JFR_CONTEXT, UBASE_MB_DESTROY, &udev->ctx_buf.jfr}, + {UBASE_MB_QUERY_JFR_CONTEXT, UBASE_MB_QUERY, &udev->ctx_buf.jfr}, + {UBASE_MB_CREATE_JETTY_GROUP_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.jtg}, + {UBASE_MB_MODIFY_JETTY_GROUP_CONTEXT, UBASE_MB_MODIFY, &udev->ctx_buf.jtg}, + {UBASE_MB_DESTROY_JETTY_GROUP_CONTEXT, UBASE_MB_DESTROY, &udev->ctx_buf.jtg}, + {UBASE_MB_QUERY_JETTY_GROUP_CONTEXT, UBASE_MB_QUERY, &udev->ctx_buf.jtg}, + {UBASE_MB_CREATE_RC_CONTEXT, UBASE_MB_CREATE, &udev->ctx_buf.rc}, + {UBASE_MB_MODIFY_RC_CONTEXT, UBASE_MB_MODIFY, &udev->ctx_buf.rc}, + {UBASE_MB_DESTROY_RC_CONTEXT, UBASE_MB_DESTROY, &udev->ctx_buf.rc}, + {UBASE_MB_QUERY_RC_CONTEXT, UBASE_MB_QUERY, &udev->ctx_buf.rc}, + }; + u32 size = ARRAY_SIZE(ta_matches); + + return ubase_parse_common_buf(attr, ta_matches, type, size); +} + +static struct ubase_ctx_buf_cap* +ubase_parse_opcode_buf(struct ubase_dev *udev, struct ubase_mbx_attr *attr, + enum ubase_mb_type *type) +{ + if (ubase_is_jfs_opcode(attr->op) && + test_bit(UBASE_STATE_PREALLOC_OK_B, &udev->state_bits)) + return NULL; + + return ubase_parse_ta_opcode_buf(udev, attr, type); +} + +static int ubase_check_buf_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, u32 tag) +{ + u32 cnt_per_page_shift = ctx_buf->cnt_per_page_shift; + u32 npage = tag >> cnt_per_page_shift; + struct ubase_ctx_page *ctx_page; + + mutex_lock(&ctx_buf->ctx_mutex); + + ctx_page = (struct ubase_ctx_page *)xa_load(&ctx_buf->ctx_xa, npage); + if (!ctx_page) { + ubase_err(udev, + "failed to find ctx page in free buf page, npage = %u.\n", + npage); + mutex_unlock(&ctx_buf->ctx_mutex); + return -EINVAL; + } + + mutex_unlock(&ctx_buf->ctx_mutex); + + return 0; +} + +static int ubase_hw_upgrade_ctx_event(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + int ret; + + down(&udev->mb_cmd.sem); + ret = ubase_cmd_mbox_event(udev, attr, mailbox); + up(&udev->mb_cmd.sem); + + return ret; +} + +int ubase_hw_upgrade_ctx_poll(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + int ret; + + down(&udev->mb_cmd.sem); + ret = ubase_cmd_mbox_poll(udev, attr, mailbox); + up(&udev->mb_cmd.sem); + + return ret; +} + +int __ubase_hw_upgrade_ctx(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + return ubase_hw_upgrade_ctx_event(udev, attr, mailbox); +} + +int __ubase_hw_upgrade_ctx_ex(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + enum ubase_mb_type type = UBASE_MB_OTHER; + struct ubase_ctx_buf_cap *ctx_buf; + int ret; + + ctx_buf = ubase_parse_opcode_buf(udev, attr, &type); + if (ctx_buf) { + if (type == UBASE_MB_CREATE) { + ret = ubase_use_buf_ctx_page(udev, ctx_buf, attr->tag); + if (ret) { + ubase_err(udev, "failed to write context va, ret = %d.\n", + ret); + return ret; + } + } else if (type == UBASE_MB_QUERY) { + ret = ubase_check_buf_ctx_page(udev, ctx_buf, attr->tag); + if (ret) { + ubase_info(udev, + "A query operation is performed before the create operation, 0 is returned by default, op = 0x%x.\n", + attr->op); + return 0; + } + } else if (type == UBASE_MB_MODIFY || type == UBASE_MB_DESTROY) { + ret = ubase_check_buf_ctx_page(udev, ctx_buf, attr->tag); + if (ret) { + ubase_info(udev, + "An access operation is performed before the create operation, op = 0x%x.\n", + attr->op); + return ret; + } + } + } + + ret = __ubase_hw_upgrade_ctx(udev, attr, mailbox); + if ((ret && type == UBASE_MB_CREATE) || + (!ret && type == UBASE_MB_DESTROY)) + ubase_free_buf_ctx_page(udev, ctx_buf, attr->tag); + + return ret; +} + +int ubase_hw_upgrade_ctx_ex(struct auxiliary_device *aux_dev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox) +{ + struct ubase_dev *udev; + + if (!aux_dev || !attr || !mailbox) + return -EINVAL; + + udev = ubase_get_udev_by_adev(aux_dev); + if (!test_bit(UBASE_STATE_CTX_READY_B, &udev->state_bits)) + return -EAGAIN; + + return __ubase_hw_upgrade_ctx_ex(udev, attr, mailbox); +} +EXPORT_SYMBOL(ubase_hw_upgrade_ctx_ex); diff --git a/drivers/ub/ubase/ubase_mailbox.h b/drivers/ub/ubase/ubase_mailbox.h new file mode 100644 index 000000000000..cfc259c301c9 --- /dev/null +++ b/drivers/ub/ubase/ubase_mailbox.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_MAILBOX_H__ +#define __UBASE_MAILBOX_H__ + +#include "ubase_comm_mbx.h" +#include "ubase_dev.h" + +#define CMD_MBX_POLL_VALUE 0xffff +#define UBASE_MAILBOX_SIZE 4096 +#define UBASE_MBX_TX_TIMEOUT 30000 + +enum ubase_mb_type { + UBASE_MB_CREATE, + UBASE_MB_MODIFY, + UBASE_MB_DESTROY, + UBASE_MB_QUERY, + UBASE_MB_OTHER, +}; + +struct mbx_op_match { + u32 op; + enum ubase_mb_type type; + struct ubase_ctx_buf_cap *ctx_caps; +}; + +int ubase_mbox_cmd_init(struct ubase_dev *udev); +void ubase_mbox_cmd_uninit(struct ubase_dev *udev); +struct ubase_cmd_mailbox *__ubase_alloc_cmd_mailbox(struct ubase_dev *udev); +void __ubase_free_cmd_mailbox(struct ubase_dev *udev, + struct ubase_cmd_mailbox *mailbox); +void ubase_destroy_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_page *ctx_page, + struct ubase_ctx_buf_cap *ctx_buf); +int ubase_hw_upgrade_ctx_poll(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox); +int __ubase_hw_upgrade_ctx(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox); +int __ubase_hw_upgrade_ctx_ex(struct ubase_dev *udev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox); +int ubase_create_ctx_page(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_ctx_page **ctx_page, u32 npage); + +#endif diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index e7089e59d1bf..0514dff9f2b8 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -10,12 +10,97 @@ #include #include +#define UBASE_FW_VERSION_BYTE3_MASK GENMASK(31, 24) +#define UBASE_FW_VERSION_BYTE2_MASK GENMASK(23, 16) +#define UBASE_FW_VERSION_BYTE1_MASK GENMASK(15, 8) +#define UBASE_FW_VERSION_BYTE0_MASK GENMASK(7, 0) + +#define UBASE_CSQ_BASEADDR_L_REG 0x18400 +#define UBASE_CSQ_BASEADDR_H_REG 0x18404 +#define UBASE_CSQ_DEPTH_REG 0x18408 +#define UBASE_CSQ_TAIL_REG 0x18410 +#define UBASE_CSQ_HEAD_REG 0x18414 +#define UBASE_CRQ_BASEADDR_L_REG 0x18418 +#define UBASE_CRQ_BASEADDR_H_REG 0x1841c +#define UBASE_CRQ_DEPTH_REG 0x18420 +#define UBASE_CRQ_TAIL_REG 0x18424 +#define UBASE_CRQ_HEAD_REG 0x18428 + +enum ubase_opcode_type { + /* Generic commands */ + UBASE_OPC_QUERY_FW_VER = 0x0001, + + /* Mailbox commands */ + UBASE_OPC_POST_MB = 0x7000, + UBASE_OPC_QUERY_MB_ST = 0X7001, + + /* Software commands */ + UBASE_OPC_MUE_TO_UE = 0xF001, + UBASE_OPC_UE_TO_MUE = 0xF002, +}; + +union ubase_mbox { + struct { + /* MB 0 */ + __le32 in_param_l; + /* MB 1 */ + __le32 in_param_h; + /* MB 2 */ + __le32 cmd : 8; + __le32 tag : 24; + /* MB 3 */ + __le32 seq_num : 16; + __le32 event_en : 1; + __le32 mbx_ue_id : 8; + __le32 rsv : 7; + /* MB 4 */ + __le32 status : 1; + __le32 hw_run : 1; + __le32 rsv1 : 30; + }; + + struct { + __le32 query_status : 1; + __le32 query_hw_run : 1; + __le32 query_rsv : 30; + }; +}; + +struct ubase_cmd_buf { + u16 opcode; + bool is_read; + u32 data_size; + void *data; +}; + struct ubase_crq_event_nb { u16 opcode; void *back; int (*crq_handler)(void *dev, void *data, u32 len); }; +static inline void ubase_fill_inout_buf(struct ubase_cmd_buf *buf, u16 opcode, + bool is_read, u32 data_size, void *data) +{ + buf->opcode = opcode; + buf->is_read = is_read; + buf->data_size = data_size; + buf->data = data; +} + +int ubase_cmd_send_inout(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, struct ubase_cmd_buf *out); +int ubase_cmd_send_in(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in); +int ubase_cmd_send_inout_ex(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, struct ubase_cmd_buf *out, + u32 time_out); +int ubase_cmd_send_in_ex(struct auxiliary_device *aux_dev, + struct ubase_cmd_buf *in, u32 time_out); + +int ubase_cmd_get_data_size(struct auxiliary_device *aux_dev, u16 opcode, + u16 *data_size); + int ubase_register_crq_event(struct auxiliary_device *aux_dev, struct ubase_crq_event_nb *nb); void ubase_unregister_crq_event(struct auxiliary_device *aux_dev, u16 opcode); diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index e1d6a3d32d6c..c07100c59a9f 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -15,6 +15,7 @@ struct iova_slot; #define UBASE_ADEV_NAME "ubase" +#define UBASE_IOVA_COMM_PFN_CNT 1 #define UBASE_MAX_DSCP (64) #define UBASE_MAX_SL_NUM (16U) #define UBASE_MAX_VL_NUM (16U) diff --git a/include/ub/ubase/ubase_comm_eq.h b/include/ub/ubase/ubase_comm_eq.h index 2dabcc0537ed..8e355b710025 100644 --- a/include/ub/ubase/ubase_comm_eq.h +++ b/include/ub/ubase/ubase_comm_eq.h @@ -40,6 +40,37 @@ struct ubase_event_nb { void *back; }; +struct ubase_aeqe { + u32 event_type : 8; + u32 sub_type : 8; + u32 rsv0 : 15; + u32 owner : 1; + + union { + struct { + u32 num; + u32 rsv0; + u32 rsv1; + } queue_event; + +#pragma pack(push, 1) + struct { + u64 out_param; + u16 seq_num; + u8 status; + u8 rsv0; + } cmd; +#pragma pack(pop) + } event; + u32 rsv[12]; +}; + +struct ubase_aeq_notify_info { + u8 event_type; + u8 sub_type; + struct ubase_aeqe *aeqe; +}; + int ubase_event_register(struct auxiliary_device *adev, struct ubase_event_nb *cb); void ubase_event_unregister(struct auxiliary_device *adev, diff --git a/include/ub/ubase/ubase_comm_mbx.h b/include/ub/ubase/ubase_comm_mbx.h new file mode 100644 index 000000000000..26c625f80a77 --- /dev/null +++ b/include/ub/ubase/ubase_comm_mbx.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef _UB_UBASE_COMM_MBX_H_ +#define _UB_UBASE_COMM_MBX_H_ + +#include +#include + +struct ubase_cmd_mailbox { + void *buf; + dma_addr_t dma; +}; + +struct ubase_mbx_attr { + __le32 tag : 24; + __le32 rsv : 8; + u8 op; + u8 mbx_ue_id; +}; + +enum ubase_mbox_opcode { + /* write/destroy jfs/jetty ctx buf */ + UBASE_MB_WRITE_JFS_CONTEXT_VA = 0x0, + UBASE_MB_CREATE_JFS_CONTEXT = 0x4, + UBASE_MB_MODIFY_JFS_CONTEXT = 0x5, + UBASE_MB_QUERY_JFS_CONTEXT = 0x6, + UBASE_MB_DESTROY_JFS_CONTEXT = 0x7, + + /* write/destroy rc ctx buf */ + UBASE_MB_WRITE_RC_CONTEXT_VA = 0x10, + UBASE_MB_CREATE_RC_CONTEXT = 0x14, + UBASE_MB_MODIFY_RC_CONTEXT = 0x15, + UBASE_MB_QUERY_RC_CONTEXT = 0x16, + UBASE_MB_DESTROY_RC_CONTEXT = 0x17, + + /* write/destroy jfc ctx buf */ + UBASE_MB_WRITE_JFC_CONTEXT_VA = 0x20, + UBASE_MB_CREATE_JFC_CONTEXT = 0x24, + UBASE_MB_MODIFY_JFC_CONTEXT = 0x25, + UBASE_MB_QUERY_JFC_CONTEXT = 0x26, + UBASE_MB_DESTROY_JFC_CONTEXT = 0x27, + + /* create/destroy aeq ctx */ + UBASE_MB_CREATE_AEQ_CONTEXT = 0x34, + UBASE_MB_QUERY_AEQ_CONTEXT = 0x36, + UBASE_MB_DESTROY_AEQ_CONTEXT = 0x37, + + /* create/destroy ceq ctx */ + UBASE_MB_CREATE_CEQ_CONTEXT = 0x44, + UBASE_MB_QUERY_CEQ_CONTEXT = 0x46, + UBASE_MB_DESTROY_CEQ_CONTEXT = 0x47, + + /* write/destroy jfr ctx buf */ + UBASE_MB_WRITE_JFR_CONTEXT_VA = 0x50, + UBASE_MB_CREATE_JFR_CONTEXT = 0x54, + UBASE_MB_MODIFY_JFR_CONTEXT = 0x55, + UBASE_MB_QUERY_JFR_CONTEXT = 0x56, + UBASE_MB_DESTROY_JFR_CONTEXT = 0x57, + + /* write/destroy jetty group ctx buf */ + UBASE_MB_WRITE_JETTY_GROUP_CONTEXT_VA = 0x60, + UBASE_MB_CREATE_JETTY_GROUP_CONTEXT = 0x64, + UBASE_MB_MODIFY_JETTY_GROUP_CONTEXT = 0x65, + UBASE_MB_QUERY_JETTY_GROUP_CONTEXT = 0x66, + UBASE_MB_DESTROY_JETTY_GROUP_CONTEXT = 0x67, + + /* query tpg ctx buf */ + UBASE_MB_QUERY_TPG_CONTEXT = 0x76, + + /* query tp ctx buf */ + UBASE_MB_QUERY_TP_CONTEXT = 0x86, +}; + +struct ubase_cmd_mailbox *ubase_alloc_cmd_mailbox(struct auxiliary_device *aux_dev); +void ubase_free_cmd_mailbox(struct auxiliary_device *aux_dev, + struct ubase_cmd_mailbox *mailbox); + +int ubase_hw_upgrade_ctx_ex(struct auxiliary_device *aux_dev, + struct ubase_mbx_attr *attr, + struct ubase_cmd_mailbox *mailbox); + +static inline void ubase_fill_mbx_attr(struct ubase_mbx_attr *attr, u32 tag, + u8 op, u8 mbx_ue_id) +{ + attr->tag = tag; + attr->op = op; + attr->mbx_ue_id = mbx_ue_id; +} + +#endif -- Gitee From a152802795763d0e27808bf239f5803c048e2802 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 17:11:27 +0800 Subject: [PATCH 03/10] ub: ubase: support debugfs public interface. drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------------ UBASE provides a set of debugfs framework for auxiliary devices. Auxiliary drivers (such as unic and cdma) use debugfs interfaces provided by UBASE to create their own debugfs files. This ensures that the hierarchy and dependencies between debugfs are maintained. This patch adds the debugfs export interface for UBASE. Signed-off-by: Jianqiang Li Signed-off-by: Fengyan Mu Signed-off-by: Haibin Lu Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/Makefile | 3 +- drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c | 31 ++++++++ drivers/ub/ubase/debugfs/ubase_debugfs.c | 75 +++++++++++++++++++- drivers/ub/ubase/ubase_mailbox.h | 3 +- include/ub/ubase/ubase_comm_debugfs.h | 7 ++ 5 files changed, 114 insertions(+), 5 deletions(-) create mode 100644 drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile index 10adf2df0859..59032bf09ddf 100644 --- a/drivers/ub/ubase/Makefile +++ b/drivers/ub/ubase/Makefile @@ -10,7 +10,8 @@ ccflags-y += -I$(srctree)/drivers/ub/ubase/debugfs MODULE_NAME := ubase UBASE_OBJS := ubase_main.o ubase_dev.o ubase_cmd.o \ - debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o + debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o \ + debugfs/ubase_ctx_debugfs.o $(MODULE_NAME)-objs := $(UBASE_OBJS) obj-$(CONFIG_UB_UBASE) := ubase.o diff --git a/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c new file mode 100644 index 000000000000..1004033c1581 --- /dev/null +++ b/drivers/ub/ubase/debugfs/ubase_ctx_debugfs.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include + +#include "ubase_debugfs.h" + +static void __ubase_print_context_hw(struct seq_file *s, void *ctx_addr, + u32 ctx_len) +{ + __le32 *p = (__le32 *)ctx_addr; + u32 i; + + ctx_len = ctx_len / sizeof(u32); + for (i = 0; i < ctx_len; i++, p++) { + seq_printf(s, "%lu\t", (i + 1) * sizeof(u32)); + seq_printf(s, "%08x\n", le32_to_cpu(*p)); + } +} + +void ubase_print_context_hw(struct seq_file *s, void *ctx_addr, u32 ctx_len) +{ + if (!s || !ctx_addr) + return; + + __ubase_print_context_hw(s, ctx_addr, ctx_len); +} +EXPORT_SYMBOL(ubase_print_context_hw); diff --git a/drivers/ub/ubase/debugfs/ubase_debugfs.c b/drivers/ub/ubase/debugfs/ubase_debugfs.c index 75bf029f17a5..46a794d935da 100644 --- a/drivers/ub/ubase/debugfs/ubase_debugfs.c +++ b/drivers/ub/ubase/debugfs/ubase_debugfs.c @@ -29,6 +29,44 @@ static bool __ubase_dbg_dentry_support(struct device *dev, u32 property) return false; } +bool ubase_dbg_dentry_support(struct auxiliary_device *adev, u32 property) +{ + if (!adev) + return false; + + return __ubase_dbg_dentry_support(__ubase_get_udev_by_adev(adev)->dev, + property); +} +EXPORT_SYMBOL(ubase_dbg_dentry_support); + +static int __ubase_dbg_seq_file_init(struct device *dev, + struct ubase_dbg_dentry_info *dirs, + struct ubase_dbgfs *dbgfs, u32 idx) +{ + struct ubase_dbg_cmd_info *cmd_info = &dbgfs->cmd_info[idx]; + struct dentry *cur_dir; + + cur_dir = dirs[cmd_info->dentry_index].dentry; + if (!cmd_info->read_func) + return -EFAULT; + + debugfs_create_devm_seqfile(dev, cmd_info->name, cur_dir, + cmd_info->read_func); + + return 0; +} + +int ubase_dbg_seq_file_init(struct device *dev, + struct ubase_dbg_dentry_info *dirs, + struct ubase_dbgfs *dbgfs, u32 idx) +{ + if (!dev || !dirs || !dbgfs || !dbgfs->cmd_info) + return -EINVAL; + + return __ubase_dbg_seq_file_init(dev, dirs, dbgfs, idx); +} +EXPORT_SYMBOL(ubase_dbg_seq_file_init); + static struct ubase_dbg_dentry_info ubase_dbg_dentry[] = { /* ue debugfs top-level directory, * "dev_name" refers to the ue name @@ -112,6 +150,7 @@ EXPORT_SYMBOL(ubase_dbg_create_dentry); int ubase_dbg_init(struct ubase_dev *udev) { + struct ubase_dbg_dentry_info dentry[UBASE_DBG_DENTRY_ROOT + 1] = {0}; const char *name = dev_name(udev->dev); struct device *dev = udev->dev; int ret; @@ -122,12 +161,13 @@ int ubase_dbg_init(struct ubase_dev *udev) return PTR_ERR(udev->dbgfs.dentry); } - ubase_dbg_dentry[UBASE_DBG_DENTRY_ROOT].dentry = udev->dbgfs.dentry; + memcpy(dentry, ubase_dbg_dentry, sizeof(dentry)); + dentry[UBASE_DBG_DENTRY_ROOT].dentry = udev->dbgfs.dentry; udev->dbgfs.cmd_info = ubase_dbg_cmd; udev->dbgfs.cmd_info_size = ARRAY_SIZE(ubase_dbg_cmd); - ret = ubase_dbg_create_dentry(dev, &udev->dbgfs, ubase_dbg_dentry, - ARRAY_SIZE(ubase_dbg_dentry) - 1); + ret = ubase_dbg_create_dentry(dev, &udev->dbgfs, dentry, + ARRAY_SIZE(dentry) - 1); if (ret) { ubase_err(udev, "failed to create ubase debugfs dentry, ret = %d.\n", @@ -163,3 +203,32 @@ void ubase_dbg_unregister_debugfs(void) { debugfs_remove_recursive(ubase_dbgfs_root); } + +struct dentry *ubase_diag_debugfs_root(struct auxiliary_device *adev) +{ + if (!adev) + return NULL; + + return __ubase_get_udev_by_adev(adev)->dbgfs.dentry; +} +EXPORT_SYMBOL(ubase_diag_debugfs_root); + +int ubase_dbg_format_time(time64_t time, struct seq_file *s) +{ +#define YEAR_OFFSET 1900 + const char week[7][4] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; + const char mouth[12][4] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; + struct tm t; + + if (!s) + return -EINVAL; + + time64_to_tm(time, 0, &t); + + seq_printf(s, "%s %s %02d %02d:%02d:%02d %ld", week[t.tm_wday], + mouth[t.tm_mon], t.tm_mday, t.tm_hour, t.tm_min, + t.tm_sec, t.tm_year + YEAR_OFFSET); + return 0; +} +EXPORT_SYMBOL(ubase_dbg_format_time); diff --git a/drivers/ub/ubase/ubase_mailbox.h b/drivers/ub/ubase/ubase_mailbox.h index cfc259c301c9..33e72b7d15b3 100644 --- a/drivers/ub/ubase/ubase_mailbox.h +++ b/drivers/ub/ubase/ubase_mailbox.h @@ -7,7 +7,8 @@ #ifndef __UBASE_MAILBOX_H__ #define __UBASE_MAILBOX_H__ -#include "ubase_comm_mbx.h" +#include + #include "ubase_dev.h" #define CMD_MBX_POLL_VALUE 0xffff diff --git a/include/ub/ubase/ubase_comm_debugfs.h b/include/ub/ubase/ubase_comm_debugfs.h index c3d9c473d646..dc0bd30aa93b 100644 --- a/include/ub/ubase/ubase_comm_debugfs.h +++ b/include/ub/ubase/ubase_comm_debugfs.h @@ -34,7 +34,14 @@ struct ubase_dbgfs { int cmd_info_size; }; +int ubase_dbg_seq_file_init(struct device *dev, + struct ubase_dbg_dentry_info *dirs, + struct ubase_dbgfs *dbgfs, u32 idx); int ubase_dbg_create_dentry(struct device *dev, struct ubase_dbgfs *dbgfs, struct ubase_dbg_dentry_info *dirs, u32 root_idx); +struct dentry *ubase_diag_debugfs_root(struct auxiliary_device *adev); +void ubase_print_context_hw(struct seq_file *s, void *ctx_addr, u32 ctx_len); +bool ubase_dbg_dentry_support(struct auxiliary_device *adev, u32 property); +int ubase_dbg_format_time(time64_t time, struct seq_file *s); #endif -- Gitee From 2e855fc37dac44e0c535dfe828e82ed8fe1bcf6f Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 17:46:15 +0800 Subject: [PATCH 04/10] ub: ubase: add the function that query ueid drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------- This patch adds the support of UBASE driver for querying bus eid for all entities. And supply the interface to get ueid for udma driver, which is a upper layer driver to get the hardware information needed. Signed-off-by: Wenlong Zhu Signed-off-by: Fengyan Mu Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/ubase_cmd.h | 5 ++++ drivers/ub/ubase/ubase_dev.c | 40 +++++++++++++++++++++++++++++++ include/ub/ubase/ubase_comm_cmd.h | 1 + include/ub/ubase/ubase_comm_dev.h | 2 ++ 4 files changed, 48 insertions(+) diff --git a/drivers/ub/ubase/ubase_cmd.h b/drivers/ub/ubase/ubase_cmd.h index 70c3f4d00719..7acaed31ee37 100644 --- a/drivers/ub/ubase/ubase_cmd.h +++ b/drivers/ub/ubase/ubase_cmd.h @@ -49,6 +49,11 @@ struct ubase_query_version_cmd { __le32 caps[UBASE_CAP_LEN]; }; +struct ubase_query_ueid_cmd { + __le32 ueid[UBASE_BUS_EID_LEN]; + u32 rsv[2]; +}; + static inline void __ubase_fill_inout_buf(struct ubase_cmd_buf *buf, u16 opcode, bool is_read, u32 data_size, void *data) { diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 8c102000e7ef..e3d770c9225e 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -655,3 +655,43 @@ bool ubase_dbg_default(void) { return ubase_debug; } + +static int ubase_query_bus_eid(struct ubase_dev *udev, struct ubase_bus_eid *eid) +{ + struct ubase_query_ueid_cmd resp = {0}; + struct ubase_cmd_buf in, out; + int i, ret; + + __ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_BUS_EID, true, 0, NULL); + __ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_BUS_EID, false, + sizeof(resp), &resp); + + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret) { + ubase_err(udev, "failed to query bus eid, ret = %d.\n", ret); + return ret; + } + + for (i = 0; i < UBASE_BUS_EID_LEN; ++i) + eid->eid[i] = le32_to_cpu(resp.ueid[i]); + + return 0; +} + +static int __ubase_get_bus_eid(struct ubase_dev *udev, struct ubase_bus_eid *eid) +{ + return ubase_query_bus_eid(udev, eid); +} + +int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid) +{ + struct ubase_dev *udev; + + if (!adev || !eid) + return -EINVAL; + + udev = __ubase_get_udev_by_adev(adev); + + return __ubase_get_bus_eid(udev, eid); +} +EXPORT_SYMBOL(ubase_get_bus_eid); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 0514dff9f2b8..9d5c352999ec 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -29,6 +29,7 @@ enum ubase_opcode_type { /* Generic commands */ UBASE_OPC_QUERY_FW_VER = 0x0001, + UBASE_OPC_QUERY_BUS_EID = 0x0047, /* Mailbox commands */ UBASE_OPC_POST_MB = 0x7000, diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index c07100c59a9f..2060a17e400c 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -222,4 +222,6 @@ struct ubase_adev_caps *ubase_get_unic_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_udma_caps(struct auxiliary_device *adev); struct ubase_adev_caps *ubase_get_cdma_caps(struct auxiliary_device *adev); +int ubase_get_bus_eid(struct auxiliary_device *adev, struct ubase_bus_eid *eid); + #endif -- Gitee From 59e37a82209a0d0fd47e0e9a46a7984424d38fb4 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 19:00:58 +0800 Subject: [PATCH 05/10] ub: ubase: add function to query device resource drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------- This patch adds the function to query device resource, which not only includes the resource needed by ubase, and also includs which necessary for driver unic, udma and cdma. Based on this, ubase as the foundation driver can do the resource management, such as TA/TP context buf and reserved jetty and so on. Ubase driver can get the resource count such as jetty number, jetty depth and tp layer resource's count and depth from firmware through the designated cmdq operation code. Signed-off-by: Fengyan Mu Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/Makefile | 2 +- drivers/ub/ubase/ubase_dev.c | 7 +- drivers/ub/ubase/ubase_hw.c | 269 ++++++++++++++++++++++++++++++ drivers/ub/ubase/ubase_hw.h | 137 +++++++++++++++ include/ub/ubase/ubase_comm_cmd.h | 5 + include/ub/ubase/ubase_comm_dev.h | 1 + 6 files changed, 417 insertions(+), 4 deletions(-) create mode 100644 drivers/ub/ubase/ubase_hw.c create mode 100644 drivers/ub/ubase/ubase_hw.h diff --git a/drivers/ub/ubase/Makefile b/drivers/ub/ubase/Makefile index 59032bf09ddf..64169ef5d796 100644 --- a/drivers/ub/ubase/Makefile +++ b/drivers/ub/ubase/Makefile @@ -9,7 +9,7 @@ ccflags-y += -I$(srctree)/drivers/ub/ubase/debugfs MODULE_NAME := ubase -UBASE_OBJS := ubase_main.o ubase_dev.o ubase_cmd.o \ +UBASE_OBJS := ubase_main.o ubase_dev.o ubase_hw.o ubase_cmd.o \ debugfs/ubase_debugfs.o ubase_eq.o ubase_mailbox.o ubase_ubus.o \ debugfs/ubase_ctx_debugfs.o diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index e3d770c9225e..e87a819cdd07 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -9,6 +9,7 @@ #include "debugfs/ubase_debugfs.h" #include "ubase_cmd.h" +#include "ubase_hw.h" #include "ubase_mailbox.h" #include "ubase_dev.h" @@ -440,7 +441,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "query dev res", UBASE_SUP_ALL, 0, - NULL, NULL + ubase_query_dev_res, NULL }, { "init mailbox", UBASE_SUP_NO_PMU, 0, @@ -448,11 +449,11 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "query chip info", UBASE_SUP_ALL, 0, - NULL, NULL + ubase_query_chip_info, NULL }, { "query controller_info", UBASE_SUP_NO_PMU, 0, - NULL, NULL + ubase_query_controller_info, NULL }, { "query hw oor caps", UBASE_SUP_NO_PMU, 0, diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c new file mode 100644 index 000000000000..11e32da647dc --- /dev/null +++ b/drivers/ub/ubase/ubase_hw.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#include "ubase_cmd.h" +#include "ubase_hw.h" + +struct ubase_dma_buf_desc { + struct ubase_dma_buf *buf; + u16 opc; + bool (*is_supported)(struct ubase_dev *dev); +}; + +#define UBASE_DEFINE_DMA_BUFS(udev) \ + struct ubase_dma_buf_desc bufs[] = { \ + } + +static void ubase_assign_addr_val(void *addr, u32 val, u8 size) +{ +#define VAL_SIZE_1 1 +#define VAL_SIZE_2 2 +#define VAL_SIZE_4 4 + + switch (size) { + case VAL_SIZE_1: + *(u8 *)addr = (u8)val; + break; + case VAL_SIZE_2: + *(u16 *)addr = (u16)val; + break; + case VAL_SIZE_4: + *(u32 *)addr = (u32)val; + break; + default: + break; + } +} + +static void ubase_check_dev_caps_comm(struct ubase_dev *udev) +{ + struct ubase_caps *caps = &udev->caps.dev_caps; + struct ubase_caps_item items[] = { + { + &caps->num_ceq_vectors, UBASE_DEF_CEQ_VECTOR_NUM, + sizeof(caps->num_ceq_vectors), "ceq vector num" + }, + { + &caps->num_aeq_vectors, UBASE_DEF_AEQ_VECTOR_NUM, + sizeof(caps->num_aeq_vectors), "aeq vector num" + }, + { + &caps->num_misc_vectors, UBASE_DEF_MISC_VERCTOR_NUM, + sizeof(caps->num_misc_vectors), "misc vector num" + }, + { + &caps->public_jetty_cnt, UBASE_DEF_PUBLIC_JETTY_CNT, + sizeof(caps->public_jetty_cnt), "public jetty count" + }, + { + &caps->aeqe_size, UBASE_DEF_EQE_SIZE, + sizeof(caps->aeqe_size), "aeqe size" + }, + { + &caps->ceqe_size, UBASE_DEF_EQE_SIZE, + sizeof(caps->ceqe_size), "ceqe size" + }, + { + &caps->aeqe_depth, UBASE_DEF_AEQ_DEPTH, + sizeof(caps->aeqe_depth), "aeqe depth" + }, + { + &caps->ceqe_depth, UBASE_DEF_CEQ_DEPTH, + sizeof(caps->ceqe_depth), "ceqe depth" + }, + }; + u32 i, items_size = ARRAY_SIZE(items); + u32 val; + + for (i = 0; i < items_size; i++) { + val = 0; + memcpy(&val, items[i].p, items[i].size); + if (!val) { + ubase_assign_addr_val(items[i].p, items[i].default_val, + items[i].size); + ubase_warn(udev, "using default %s(%u).\n", + items[i].name, items[i].default_val); + } + } +} + +static int ubase_check_dev_caps_extdb(struct ubase_dev *udev) +{ + UBASE_DEFINE_DMA_BUFS(udev); + int i; + + for (i = 0; i < ARRAY_SIZE(bufs); i++) { + if (bufs[i].is_supported(udev) && !bufs[i].buf->size) { + ubase_err(udev, + "failed to check caps: buf[%d] size=0.\n", i); + return -EINVAL; + } + } + + return 0; +} + +static int ubase_check_dev_caps(struct ubase_dev *udev) +{ + ubase_check_dev_caps_comm(udev); + + return ubase_check_dev_caps_extdb(udev); +} + +static void ubase_parse_dev_caps_comm(struct ubase_dev *udev, + const struct ubase_res_cmd_resp *resp) +{ + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + + dev_caps->num_ceq_vectors = le16_to_cpu(resp->ceq_vector_num); + dev_caps->num_aeq_vectors = le16_to_cpu(resp->aeq_vector_num); + dev_caps->num_misc_vectors = le16_to_cpu(resp->misc_vector_num); + dev_caps->aeqe_size = le16_to_cpu(resp->aeqe_size); + dev_caps->ceqe_size = le16_to_cpu(resp->ceqe_size); + dev_caps->aeqe_depth = le32_to_cpu(resp->aeqe_depth); + dev_caps->ceqe_depth = le32_to_cpu(resp->ceqe_depth); + dev_caps->total_ue_num = le32_to_cpu(resp->total_ue_num); + dev_caps->public_jetty_cnt = le32_to_cpu(resp->public_jetty_cnt); + dev_caps->rsvd_jetty_cnt = le16_to_cpu(resp->rsvd_jetty_cnt); + dev_caps->ue_num = resp->ue_num; + dev_caps->mac_stats_num = le16_to_cpu(resp->mac_stats_num); + + udev->ta_ctx.extdb_buf.size = le32_to_cpu(resp->ta_extdb_buf_size); + udev->ta_ctx.timer_buf.size = le32_to_cpu(resp->ta_timer_buf_size); +} + +static void ubase_parse_dev_caps_unic(struct ubase_dev *udev, + const struct ubase_res_cmd_resp *resp) +{ + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + + unic_caps->jfs.max_cnt = le32_to_cpu(resp->nic_jfs_max_cnt); + unic_caps->jfs.reserved_cnt = le32_to_cpu(resp->nic_jfs_reserved_cnt); + unic_caps->jfs.depth = le32_to_cpu(resp->nic_jfs_depth); + unic_caps->jfr.max_cnt = le32_to_cpu(resp->nic_jfr_max_cnt); + unic_caps->jfr.reserved_cnt = le32_to_cpu(resp->nic_jfr_reserved_cnt); + unic_caps->jfr.depth = le32_to_cpu(resp->nic_jfr_depth); + unic_caps->jfc.max_cnt = le32_to_cpu(resp->nic_jfc_max_cnt); + unic_caps->jfc.reserved_cnt = le32_to_cpu(resp->nic_jfc_reserved_cnt); + unic_caps->jfc.depth = le32_to_cpu(resp->nic_jfc_depth); + unic_caps->cqe_size = le16_to_cpu(resp->nic_cqe_size); + unic_caps->utp_port_bitmap = le32_to_cpu(resp->port_bitmap); +} + +static void ubase_parse_dev_caps_udma(struct ubase_dev *udev, + const struct ubase_res_cmd_resp *resp) +{ + struct ubase_adev_caps *udma_caps = &udev->caps.udma_caps; + + udma_caps->jfs.max_cnt = le32_to_cpu(resp->udma_jfs_max_cnt); + udma_caps->jfs.reserved_cnt = le32_to_cpu(resp->udma_jfs_reserved_cnt); + udma_caps->jfs.depth = le32_to_cpu(resp->udma_jfs_depth); + udma_caps->jfr.max_cnt = le32_to_cpu(resp->udma_jfr_max_cnt); + udma_caps->jfr.reserved_cnt = le32_to_cpu(resp->udma_jfr_reserved_cnt); + udma_caps->jfr.depth = le32_to_cpu(resp->udma_jfr_depth); + udma_caps->jfc.max_cnt = le32_to_cpu(resp->udma_jfc_max_cnt); + udma_caps->jfc.reserved_cnt = le32_to_cpu(resp->udma_jfc_reserved_cnt); + udma_caps->jfc.depth = le32_to_cpu(resp->udma_jfc_depth); + udma_caps->cqe_size = le16_to_cpu(resp->udma_cqe_size); + udma_caps->jtg_max_cnt = le32_to_cpu(resp->jtg_max_cnt); + udma_caps->rc_max_cnt = le32_to_cpu(resp->rc_max_cnt_per_vl); + udma_caps->rc_que_depth = le32_to_cpu(resp->udma_rc_depth); +} + +static void ubase_parse_dev_caps(struct ubase_dev *udev, + const struct ubase_res_cmd_resp *resp) +{ + int i; + + for (i = 0; i < UBASE_CAP_LEN; i++) + udev->cap_bits[i] = le32_to_cpu(resp->cap_bits[i]); + + ubase_parse_dev_caps_comm(udev, resp); + ubase_parse_dev_caps_unic(udev, resp); + ubase_parse_dev_caps_udma(udev, resp); +} + +static int ubase_parse_dev_res(struct ubase_dev *udev, + struct ubase_res_cmd_resp *resp) +{ + ubase_parse_dev_caps(udev, resp); + return ubase_check_dev_caps(udev); +} + +int ubase_query_dev_res(struct ubase_dev *udev) +{ + struct ubase_res_cmd_resp resp = {0}; + struct ubase_cmd_buf out; + struct ubase_cmd_buf in; + int ret; + + __ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_COMM_RSRC_PARAM, true, 0, + NULL); + + __ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_COMM_RSRC_PARAM, false, + sizeof(resp), &resp); + + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret) { + ubase_err(udev, "failed to query ubase res, ret = %d.\n", ret); + return ret; + } + + return ubase_parse_dev_res(udev, &resp); +} + +int ubase_query_controller_info(struct ubase_dev *udev) +{ + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + struct ubase_query_controller_info_resp resp = {0}; + struct ubase_cmd_buf in, out; + int ret; + + ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_CTL_INFO, true, 0, NULL); + ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_CTL_INFO, true, + sizeof(resp), &resp); + + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret) { + ubase_err(udev, + "failed to query controller info, ret = %d.\n", ret); + return ret; + } + + dev_caps->packet_pattern_mode = resp.packet_pattern_mode; + dev_caps->ack_queue_num = resp.ack_queue_num; + + return 0; +} + +int ubase_query_chip_info(struct ubase_dev *udev) +{ + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + struct ubase_query_chip_die_cmd resp = {0}; + struct ubase_cmd_buf in, out; + int ret; + + __ubase_fill_inout_buf(&in, UBASE_OPC_QUERY_CHIP_INFO, true, 0, NULL); + __ubase_fill_inout_buf(&out, UBASE_OPC_QUERY_CHIP_INFO, true, + sizeof(resp), &resp); + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret) { + ubase_err(udev, + "failed to query ub chip info, ret = %d.\n", ret); + return ret; + } + + dev_caps->nl_port_id = le16_to_cpu(resp.nl_port_id); + dev_caps->chip_id = le16_to_cpu(resp.chip_id); + dev_caps->die_id = le16_to_cpu(resp.die_id); + dev_caps->io_port_id = le16_to_cpu(resp.io_port_id); + dev_caps->ue_id = le16_to_cpu(resp.ue_id); + dev_caps->ub_port_logic_id = le16_to_cpu(resp.ub_port_logic_id); + dev_caps->io_port_logic_id = le16_to_cpu(resp.io_port_logic_id); + dev_caps->nl_id = le16_to_cpu(resp.nl_id); + + return 0; +} diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h new file mode 100644 index 000000000000..b96db6d5c126 --- /dev/null +++ b/drivers/ub/ubase/ubase_hw.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2025 HiSilicon Technologies Co., Ltd. All rights reserved. + * + */ + +#ifndef __UBASE_HW_H__ +#define __UBASE_HW_H__ + +#include + +#define UBASE_DEF_CEQ_VECTOR_NUM 1 +#define UBASE_DEF_AEQ_VECTOR_NUM 1 +#define UBASE_DEF_MISC_VERCTOR_NUM 1 +#define UBASE_DEF_PUBLIC_JETTY_CNT 1024 +#define UBASE_DEF_EQE_SIZE 64 +#define UBASE_DEF_AEQ_DEPTH 512 +#define UBASE_DEF_CEQ_DEPTH 4096 + +struct ubase_caps_item { + void *p; + u32 default_val; + u8 size; + const char *name; +}; + +struct ubase_res_cmd_resp { + __le32 cap_bits[UBASE_CAP_LEN]; + __le32 rsvd[3]; + + u8 rsvd1[2]; + __le16 ceq_vector_num; + __le16 aeq_vector_num; + __le16 misc_vector_num; + __le16 aeqe_size; + __le16 ceqe_size; + __le16 udma_cqe_size; + __le16 nic_cqe_size; + __le32 aeqe_depth; + __le32 ceqe_depth; + __le32 udma_jfs_max_cnt; + __le32 udma_jfs_reserved_cnt; + + __le32 udma_jfs_depth; + __le32 udma_jfr_max_cnt; + __le32 udma_jfr_reserved_cnt; + __le32 udma_jfr_depth; + u8 nic_vl_num; + u8 rsvd2[3]; + u8 nic_vl[UBASE_MAX_REQ_VL_NUM]; + __le32 udma_jfc_max_cnt; + + __le32 udma_jfc_reserved_cnt; + __le32 udma_jfc_depth; + __le32 udma_tp_max_cnt; + __le32 udma_tp_reserved_cnt; + __le32 udma_tp_depth; + __le32 udma_tpg_max_cnt; + __le32 udma_tpg_reserved_cnt; + __le32 udma_tpg_depth; + + __le32 nic_jfs_max_cnt; + __le32 nic_jfs_reserved_cnt; + __le32 nic_jfs_depth; + __le32 nic_jfr_max_cnt; + __le32 nic_jfr_reserved_cnt; + __le32 nic_jfr_depth; + __le32 rsvd3[2]; + + __le32 rsvd4; + __le32 nic_jfc_max_cnt; + __le32 nic_jfc_reserved_cnt; + __le32 nic_jfc_depth; + __le32 nic_tp_max_cnt; + __le32 nic_tp_reserved_cnt; + __le32 nic_tp_depth; + __le32 nic_tpg_max_cnt; + + __le32 nic_tpg_reserved_cnt; + __le32 nic_tpg_depth; + __le32 total_ue_num; + __le32 jfs_ctx_size; + __le32 jfr_ctx_size; + __le32 jfc_ctx_size; + __le32 tp_ctx_size; + __le16 rsvd_jetty_cnt; + __le16 mac_stats_num; + + __le32 ta_extdb_buf_size; + __le32 ta_timer_buf_size; + __le32 public_jetty_cnt; + __le32 tp_extdb_buf_size; + __le32 tp_timer_buf_size; + u8 port_work_mode; + u8 udma_vl_num; + u8 udma_tp_resp_vl_offset; + u8 ue_num; + __le32 port_bitmap; + u8 rsvd5[4]; + + /* include udma tp and ctp req vl */ + u8 udma_req_vl[UBASE_MAX_REQ_VL_NUM]; + __le32 udma_rc_depth; + u8 rsvd6[4]; + __le32 jtg_max_cnt; + __le32 rc_max_cnt_per_vl; + __le32 dest_addr_max_cnt; + __le32 seid_upi_max_cnt; + + __le32 tpm_max_cnt; + __le32 ccc_max_cnt; +}; + +struct ubase_query_controller_info_resp { + __le32 rsvd0[2]; + u8 packet_pattern_mode : 1; + u8 ack_queue_num : 4; + u8 rsvd1 : 3; + u8 rsvd2[15]; +}; + +struct ubase_query_chip_die_cmd { + __le16 nl_port_id; + __le16 chip_id; + __le16 die_id; + __le16 io_port_id; + __le16 ue_id; + __le16 ub_port_logic_id; + __le16 nl_id; + __le16 io_port_logic_id; +}; + +int ubase_query_dev_res(struct ubase_dev *udev); +int ubase_query_chip_info(struct ubase_dev *udev); +int ubase_query_controller_info(struct ubase_dev *udev); + +#endif diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index 9d5c352999ec..b8aa23431003 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -29,8 +29,13 @@ enum ubase_opcode_type { /* Generic commands */ UBASE_OPC_QUERY_FW_VER = 0x0001, + UBASE_OPC_QUERY_CTL_INFO = 0x0003, + UBASE_OPC_QUERY_COMM_RSRC_PARAM = 0x0030, UBASE_OPC_QUERY_BUS_EID = 0x0047, + /* PHY commands */ + UBASE_OPC_QUERY_CHIP_INFO = 0x6201, + /* Mailbox commands */ UBASE_OPC_POST_MB = 0x7000, UBASE_OPC_QUERY_MB_ST = 0X7001, diff --git a/include/ub/ubase/ubase_comm_dev.h b/include/ub/ubase/ubase_comm_dev.h index 2060a17e400c..d8bcb9349b42 100644 --- a/include/ub/ubase/ubase_comm_dev.h +++ b/include/ub/ubase/ubase_comm_dev.h @@ -18,6 +18,7 @@ struct iova_slot; #define UBASE_IOVA_COMM_PFN_CNT 1 #define UBASE_MAX_DSCP (64) #define UBASE_MAX_SL_NUM (16U) +#define UBASE_MAX_REQ_VL_NUM (8U) #define UBASE_MAX_VL_NUM (16U) #if UBASE_MAX_VL_NUM < IEEE_8021QAZ_MAX_TCS #error "UBASE_MAX_VL_NUM can't less than IEEE_8021QAZ_MAX_TCS" -- Gitee From d6d63eaa720e3c8b388e7a3c2e563d086d1f384b Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 19:39:52 +0800 Subject: [PATCH 06/10] ub: ubase: add interrupt management framework and misc handler drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch implements a complete interrupt management framework for the ubase driver, primarily providing two interface functions, ubase_irq_table_init and ubase_irq_table_uninit, for centralized management of device interrupt resources. The framework supports the handling of miscellaneous interrupts (misc IRQ), including interrupt request, registration, enabling/disabling, and resource release. It integrates with the underlying hardware abstraction layer (ubus) to obtain interrupt vectors, offering devices a unified interrupt lifecycle management capability. Signed-off-by: Haibin Lu Signed-off-by: Xiaobo Zhang Signed-off-by: Junxin Chen Signed-off-by: Xiongchuan Zhou Signed-off-by: Fengyan Mu --- drivers/ub/ubase/ubase_dev.c | 2 +- drivers/ub/ubase/ubase_eq.c | 208 +++++++++++++++++++++++++++++++++++ drivers/ub/ubase/ubase_eq.h | 12 ++ 3 files changed, 221 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index e87a819cdd07..7e7df281940a 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -461,7 +461,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "init irq table", UBASE_SUP_NO_PMU, 1, - NULL, NULL + ubase_irq_table_init, ubase_irq_table_uninit }, { "init ctrl queue", UBASE_SUP_NO_PMU, 1, diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 98e4bfe69ef6..6d3bcb0fb404 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -4,11 +4,219 @@ * */ +#include #include #include "ubase_dev.h" #include "ubase_eq.h" +void ubase_enable_misc_vector(struct ubase_dev *udev, bool enable) +{ + ubase_write_dev(&udev->hw, UBASE_MISC_VECTOR_REG_OFFSET, + enable ? 0 : 1); +} + +static int ubase_reg_event_handler(struct ubase_dev *udev) +{ + int ret = IRQ_HANDLED; + + ubase_enable_misc_vector(udev, false); + + ubase_enable_misc_vector(udev, true); + + return ret; +} + +static irqreturn_t ubase_misc_int_handler(int irq, void *data) +{ + struct ubase_dev *udev = (struct ubase_dev *)data; + + return IRQ_RETVAL(ubase_reg_event_handler(udev)); +} + +static int ubase_request_misc_irq(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + struct ubase_irq *irq; + int ret; + + if (ubase_ubus_irq_vector(udev->dev, 0) == -EOPNOTSUPP) + return 0; + + irq = irq_table->irqs[UBASE_MISC_IRQ_INDEX]; + snprintf(irq->name, UBASE_INT_NAME_LEN, "ubase%d-%s-%d", udev->dev_id, + "misc", 0); + ret = request_irq(irq->irqn, ubase_misc_int_handler, 0, irq->name, udev); + if (ret) { + ubase_err(udev, + "failed to request misc irq, ret = %d.\n", ret); + return ret; + } + + ubase_enable_misc_vector(udev, true); + + return ret; +} + +static void ubase_free_misc_irq(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + struct ubase_irq *irq; + + if (!irq_table->irqs) + return; + + ubase_enable_misc_vector(udev, false); + + irq = irq_table->irqs[UBASE_MISC_IRQ_INDEX]; + if (ubase_ubus_irq_vector(udev->dev, 0) != -EOPNOTSUPP) + free_irq(irq->irqn, udev); +} + +static int ubase_irq_init(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + u32 irqs_num = irq_table->irqs_num; + struct ubase_irq **irqs; + int ret; + u32 i; + + irqs = kcalloc(irqs_num, sizeof(struct ubase_irq *), GFP_KERNEL); + if (!irqs) { + ubase_err(udev, "failed to alloc irqs.\n"); + return -ENOMEM; + } + + for (i = 0; i < irqs_num; i++) { + irqs[i] = kzalloc(sizeof(struct ubase_irq), GFP_KERNEL); + if (!irqs[i]) { + ubase_err(udev, "failed to alloc ubase irq[%u].\n", i); + ret = -ENOMEM; + goto err_alloc_ubase_irq; + } + + if (ubase_ubus_irq_vector(udev->dev, 0) == -EOPNOTSUPP) + continue; + + irqs[i]->irqn = ubase_ubus_irq_vector(udev->dev, i); + if (irqs[i]->irqn < 0) { + ubase_err(udev, + "failed to get irq[%u] num, err irq num = %d.\n", + i, irqs[i]->irqn); + ret = irqs[i]->irqn; + kfree(irqs[i]); + goto err_alloc_ubase_irq; + } + } + irq_table->irqs = irqs; + + return 0; + +err_alloc_ubase_irq: + for (; i > 0; i--) + kfree(irqs[i - 1]); + kfree(irqs); + irq_table->irqs = NULL; + + return ret; +} + +static void ubase_irq_uninit(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + u32 i; + + if (!irq_table->irqs) + return; + + for (i = 0; i < irq_table->irqs_num; i++) + kfree(irq_table->irqs[i]); + kfree(irq_table->irqs); + irq_table->irqs = NULL; +} + +static int ubase_request_irq(struct ubase_dev *udev) +{ + int ret; + + ret = ubase_request_misc_irq(udev); + if (ret) { + ubase_err(udev, + "failed to request ubase misc irq, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static void ubase_free_irq(struct ubase_dev *udev) +{ + ubase_free_misc_irq(udev); +} + +int ubase_irq_table_init(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + int i, j, ret; + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) { + for (i = 0; i < UBASE_DRV_MAX; i++) { + for (j = 0; j < UBASE_EVENT_TYPE_MAX; j++) + BLOCKING_INIT_NOTIFIER_HEAD(&irq_table->nh[i][j]); + } + mutex_init(&udev->irq_table.ceq_lock); + } + + ret = ubase_ubus_irq_vectors_alloc(udev->dev); + if (ret) { + ubase_err(udev, "failed to alloc irq vectors, ret = %d.\n", + ret); + goto err_irq_res_init; + } + + ret = ubase_irq_init(udev); + if (ret) { + ubase_err(udev, "failed to init ubase irq, ret = %d.\n", ret); + goto err_irq_init; + } + + ret = ubase_request_irq(udev); + if (ret) { + ubase_err(udev, "failed to request ubase irq, ret = %d.\n", + ret); + goto err_request_irq; + } + + clear_bit(UBASE_STATE_IRQ_INVALID_B, &udev->state_bits); + + return 0; + +err_request_irq: + ubase_irq_uninit(udev); +err_irq_init: + ubase_ubus_irq_vectors_free(udev->dev); +err_irq_res_init: + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + mutex_destroy(&udev->irq_table.ceq_lock); + + return ret; +} + +void ubase_irq_table_free(struct ubase_dev *udev) +{ + if (test_and_set_bit(UBASE_STATE_IRQ_INVALID_B, &udev->state_bits)) + return; + + ubase_free_irq(udev); + ubase_irq_uninit(udev); + ubase_ubus_irq_vectors_free(udev->dev); +} + +void ubase_irq_table_uninit(struct ubase_dev *udev) +{ + ubase_irq_table_free(udev); +} + static int __ubase_event_register(struct ubase_dev *udev, struct ubase_event_nb *cb) { diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h index 5602e166a82d..6d40ef2173bb 100644 --- a/drivers/ub/ubase/ubase_eq.h +++ b/drivers/ub/ubase/ubase_eq.h @@ -17,6 +17,13 @@ #define UBASE_AE_LEVEL_NUM 4 +/* Vector0 interrupt control register */ +#define UBASE_MISC_VECTOR_REG_OFFSET 0x18020 + +enum ubase_eqc_irqn { + UBASE_MISC_IRQ_INDEX, +}; + struct ubase_irq { char name[UBASE_INT_NAME_LEN]; int irqn; @@ -69,7 +76,12 @@ struct ubase_irq_table { u32 irqs_num; }; +int ubase_irq_table_init(struct ubase_dev *udev); +void ubase_irq_table_uninit(struct ubase_dev *udev); + int ubase_register_ae_event(struct ubase_dev *udev); void ubase_unregister_ae_event(struct ubase_dev *udev); +void ubase_enable_misc_vector(struct ubase_dev *udev, bool enable); + #endif -- Gitee From 9cc0f22b5d69364f95783a1766fe155b6b01493f Mon Sep 17 00:00:00 2001 From: Fengyan Mu Date: Tue, 4 Nov 2025 09:59:22 +0800 Subject: [PATCH 07/10] ub: ubase: support for async event process drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch implements a complete asynchronous event queue (AEQ) management function for the ubase driver, with major enhancements including: 1. AEQ Core Mechanism: Added AEQ creation/destruction functionality, supporting interaction with hardware to manage EQ contexts via mailbox. 2. Event Handling Optimization: Implemented the main logic for asynchronous event handling, supporting event classification processing and an asynchronous processing mechanism based on work queues. 3. Interrupt Handling Enhancement: Expanded interrupt handlers, added AEQ-specific interrupt handling, and improved event cause identification and clearing mechanisms. 4. Hardware Interaction Support: Implemented EQ database update mechanisms and AEQ element traversal functions, using DMA-coherent memory allocation for EQ memory space. 5. Event Registration Expansion: Default registration of mailbox event callbacks, supporting event notification mechanisms for various driver types. This implementation provides efficient event handling capabilities, reliably processing various asynchronous events generated by hardware, and offers a comprehensive event notification infrastructure for upper-layer drivers. Signed-off-by: Xiaobo Zhang Signed-off-by: Junxin Chen Signed-off-by: Xiongchuan Zhou Signed-off-by: Fengyan Mu --- drivers/ub/ubase/ubase_eq.c | 401 +++++++++++++++++++++++++++++++++++- drivers/ub/ubase/ubase_eq.h | 97 +++++++++ 2 files changed, 495 insertions(+), 3 deletions(-) diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 6d3bcb0fb404..4c7add6ee24b 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -7,23 +7,207 @@ #include #include +#include "ubase_cmd.h" #include "ubase_dev.h" +#include "ubase_mailbox.h" #include "ubase_eq.h" +enum ubase_eq_type { + UBASE_EQ_TYPE_AEQ, +}; + +static void ubase_update_eq_db(struct ubase_eq *eq, enum ubase_eq_type eq_type) +{ + struct ubase_eq_db eq_db = {0}; + + eq_db.eqn = eq->eqn; + eq_db.ci = eq->cons_index; + + writeq(*(__le64 *)&eq_db, eq->db_reg); +} + +static struct ubase_aeqe *ubase_next_aeqe(struct ubase_dev *udev, + struct ubase_aeq *aeq) +{ + struct ubase_eq *eq = &aeq->eq; + struct ubase_aeqe *aeqe; + + aeqe = (struct ubase_aeqe *)(eq->addr.addr + + (eq->cons_index & (eq->entries_num - 1)) * + eq->eqe_size); + + return aeqe->owner ^ !!(eq->cons_index & eq->entries_num) ? aeqe : NULL; +} + void ubase_enable_misc_vector(struct ubase_dev *udev, bool enable) { ubase_write_dev(&udev->hw, UBASE_MISC_VECTOR_REG_OFFSET, enable ? 0 : 1); } +static unsigned long ubase_check_event_cause(struct ubase_dev *udev) +{ + unsigned long event_cause = 0; + u32 cmdq_src_reg; + + cmdq_src_reg = ubase_read_dev(&udev->hw, UBASE_VECTOR0_CMDQ_SRC_REG); + if (cmdq_src_reg & BIT(UBASE_VECTOR0_RX_CMDQ_INT_B)) + event_cause |= BIT(UBASE_ASYNC_EVENT_CRQ_B); + + return event_cause; +} + +static void ubase_clear_event_cause(struct ubase_dev *udev, + unsigned long event_cause) +{ + if (test_bit(UBASE_ASYNC_EVENT_CRQ_B, &event_cause)) + ubase_write_dev(&udev->hw, UBASE_VECTOR0_CMDQ_SRC_REG, + BIT(UBASE_VECTOR0_RX_CMDQ_INT_B)); +} + +static void ubase_clear_all_event_cause(struct ubase_dev *udev) +{ + ubase_clear_event_cause(udev, BIT(UBASE_ASYNC_EVENT_CRQ_B)); +} + +static void ubase_crq_task_schedule(struct ubase_dev *udev) +{ + if (!test_and_set_bit(UBASE_STATE_CRQ_SERVICE_SCHED, + &udev->service_task.state)) { + udev->crq_table.last_crq_scheduled = jiffies; + mod_delayed_work(udev->ubase_wq, + &udev->service_task.service_task, 0); + } +} + static int ubase_reg_event_handler(struct ubase_dev *udev) { - int ret = IRQ_HANDLED; + unsigned long event_cause; ubase_enable_misc_vector(udev, false); + event_cause = ubase_check_event_cause(udev); + if (test_bit(UBASE_ASYNC_EVENT_CRQ_B, &event_cause)) + ubase_crq_task_schedule(udev); + + ubase_clear_event_cause(udev, event_cause); ubase_enable_misc_vector(udev, true); + return event_cause ? IRQ_HANDLED : IRQ_NONE; +} + +static bool ubase_is_udma_event(struct ubase_dev *udev, u8 event_type, + u8 sub_type, struct ubase_aeqe *aeqe) +{ + switch (event_type) { + case UBASE_EVENT_TYPE_CHECK_TOKEN: + return true; + default: + break; + } + + return false; +} + +static bool ubase_is_comm_event(struct ubase_dev *udev, struct ubase_aeqe *aeqe) +{ + switch (aeqe->event_type) { + case UBASE_EVENT_TYPE_MB: + return true; + default: + return false; + } + + return false; +} + +static void ubase_aeq_event_handler(struct ubase_dev *udev, + struct ubase_aeqe *aeqe) +{ + struct ubase_aeq_notify_info info; + u8 event_type = aeqe->event_type; + u8 sub_type = aeqe->sub_type; + u8 idx; + + if (event_type >= UBASE_EVENT_TYPE_MAX) { + ubase_err(udev, "event type wrong, event_type = %u.\n", + event_type); + return; + } + + info.event_type = event_type; + info.sub_type = sub_type; + info.aeqe = aeqe; + + if (ubase_is_comm_event(udev, aeqe)) + idx = UBASE_DRV_UNIC; + else if (ubase_is_udma_event(udev, event_type, sub_type, aeqe)) + idx = UBASE_DRV_UDMA; + else + idx = UBASE_DRV_UNIC; + + ubase_dbg(udev, "ubase do async work, idx = %u, event_type = %u.\n", + idx, event_type); + + blocking_notifier_call_chain(&udev->irq_table.nh[idx][event_type], + event_type, (void *)&info); +} + +static void ubase_async_service_task(struct work_struct *work) +{ + struct ubase_aeq_work *aeq_work = + container_of(work, struct ubase_aeq_work, work); + struct ubase_aeqe *aeqe = &aeq_work->aeqe; + struct ubase_dev *udev = aeq_work->udev; + + ubase_aeq_event_handler(udev, aeqe); + + kfree(aeq_work); +} + +static void ubase_init_aeq_work(struct ubase_dev *udev, struct ubase_aeqe *aeqe) +{ + struct ubase_aeq_work *aeq_work; + + aeq_work = kzalloc(sizeof(struct ubase_aeq_work), GFP_ATOMIC); + if (!aeq_work) { + dev_err_ratelimited(udev->dev, "failed to alloc aeq work.\n"); + return; + } + + aeq_work->udev = udev; + memcpy(&aeq_work->aeqe, aeqe, sizeof(struct ubase_aeqe)); + INIT_WORK(&aeq_work->work, ubase_async_service_task); + + queue_work(udev->ubase_async_wq, &aeq_work->work); +} + +static int ubase_async_event_handler(struct ubase_dev *udev) +{ + struct ubase_aeq *aeq = &udev->irq_table.aeq; + struct ubase_eq *eq = &aeq->eq; + struct ubase_aeqe *aeqe; + int ret = IRQ_NONE; + + aeqe = ubase_next_aeqe(udev, aeq); + while (aeqe) { + dma_rmb(); + + ubase_dbg(udev, + "event_type = 0x%x, sub_type = 0x%x, owner = %u, seq_num = %u, cons_index = %u.\n", + aeqe->event_type, aeqe->sub_type, aeqe->owner, + aeqe->event.cmd.seq_num, eq->cons_index); + + ret = IRQ_HANDLED; + + ubase_init_aeq_work(udev, aeqe); + + ++aeq->eq.cons_index; + aeqe = ubase_next_aeqe(udev, aeq); + + ubase_update_eq_db(&aeq->eq, UBASE_EQ_TYPE_AEQ); + } + return ret; } @@ -34,6 +218,143 @@ static irqreturn_t ubase_misc_int_handler(int irq, void *data) return IRQ_RETVAL(ubase_reg_event_handler(udev)); } +static irqreturn_t ubase_aeq_int_handler(int irq, void *data) +{ + struct ubase_dev *udev = (struct ubase_dev *)data; + + ubase_dbg(udev, "ubase enter aeq handler.\n"); + + return IRQ_RETVAL(ubase_async_event_handler(udev)); +} + +static void ubase_construct_eq_ctx(struct ubase_eq *eq, + struct ubase_eq_ctx *ctx, u32 tid) +{ + ctx->state = eq->state; + ctx->arm_st = eq->arm_st; + ctx->eqe_size = eq->eqe_size == UBASE_DEFAULT_EQE_SIZE ? 1 : 0; + ctx->pi = UBASE_EQ_INIT_PROD_IDX; + ctx->shift = ilog2(eq->entries_num) - UBASE_CTX_SHIFT_BASE; + ctx->eqe_coalesce_period = eq->eq_period; + ctx->ci = UBASE_EQ_INIT_CONS_IDX; + ctx->eqe_coalesce_cnt = eq->coalesce_cnt; + ctx->eqe_base_addr_l = eq->addr.dma_addr >> + UBASE_EQE_BA_L_OFFSET & UBASE_EQE_BA_L_VALID_BIT; + ctx->eqe_base_addr_h = eq->addr.dma_addr >> + UBASE_EQE_BA_H_OFFSET & UBASE_EQE_BA_H_VALID_BIT; + ctx->eqe_token_id = tid; + ctx->irq_num = eq->eqc_irqn; + ctx->pi_bypass = UBASE_EQ_INIT_PROD_IDX; + ctx->state2 = eq->state; +} + +static int ubase_fill_eq_attribute(struct ubase_dev *udev, struct ubase_eq *eq, + u32 eqn, struct ubase_irq *irq, + enum ubase_eq_type eq_type) +{ + struct ubase_eq_addr *eq_addr = &eq->addr; + + if (eq_type == UBASE_EQ_TYPE_AEQ) { + eq->eqe_size = udev->caps.dev_caps.aeqe_size; + eq->entries_num = udev->caps.dev_caps.aeqe_depth; + eq->eq_period = EQC_EQ_MAX_PERIOD_INDX; + eq->eqc_irqn = eqn + udev->caps.dev_caps.num_misc_vectors; + } + + eq->cons_index = 0; + eq->db_reg = udev->hw.mem_base.addr; + eq->eqn = eqn; + eq->state = UBASE_EQ_STAT_VALID; + eq->arm_st = UBASE_EQ_ALWAYS_ARMED; + eq->coalesce_cnt = UBASE_EQ_COALESCE_0; + eq->irqn = irq->irqn; + + eq_addr->size = eq->entries_num * eq->eqe_size; + eq_addr->addr = dma_alloc_coherent(udev->dev, eq_addr->size, + &eq_addr->dma_addr, GFP_KERNEL); + if (!eq_addr->addr) { + ubase_err(udev, "failed to alloc eqe base addr.\n"); + return -ENOMEM; + } + + return 0; +} + +static int ubase_create_eq(struct ubase_dev *udev, struct ubase_eq *eq, u32 eqn, + struct ubase_irq *irq, enum ubase_eq_type eq_type) +{ + struct ubase_cmd_mailbox *mbx; + struct ubase_mbx_attr attr; + int mbx_cmd; + int ret; + + ret = ubase_fill_eq_attribute(udev, eq, eqn, irq, eq_type); + if (ret) { + ubase_err(udev, "failed to fill eq attribute.\n"); + return ret; + } + + mbx_cmd = eq_type == UBASE_EQ_TYPE_AEQ ? UBASE_MB_CREATE_AEQ_CONTEXT : + UBASE_MB_CREATE_CEQ_CONTEXT; + mbx = __ubase_alloc_cmd_mailbox(udev); + if (IS_ERR_OR_NULL(mbx)) { + ubase_err(udev, "failed to alloc mailbox for create EQC.\n"); + ret = -ENOMEM; + goto err_alloc_mailbox; + } + ubase_construct_eq_ctx(eq, (struct ubase_eq_ctx *)mbx->buf, + udev->caps.dev_caps.tid); + ubase_fill_mbx_attr(&attr, eq->eqn, mbx_cmd, 0); + ret = ubase_hw_upgrade_ctx_poll(udev, &attr, mbx); + if (ret) { + ubase_err(udev, "failed to create EQC, ret = %d.\n", ret); + goto err_upgrade_ctx; + } + + __ubase_free_cmd_mailbox(udev, mbx); + + return 0; + +err_upgrade_ctx: + __ubase_free_cmd_mailbox(udev, mbx); +err_alloc_mailbox: + dma_free_coherent(udev->dev, eq->addr.size, eq->addr.addr, + eq->addr.dma_addr); + eq->addr.addr = NULL; + + return ret; +} + +static int ubase_destroy_eq(struct ubase_dev *udev, struct ubase_eq *eq, + enum ubase_eq_type eq_type) +{ + struct ubase_cmd_mailbox *mbx; + struct ubase_mbx_attr attr; + int mbx_cmd; + int ret; + + mbx_cmd = eq_type == UBASE_EQ_TYPE_AEQ ? UBASE_MB_DESTROY_AEQ_CONTEXT : + UBASE_MB_DESTROY_CEQ_CONTEXT; + + mbx = __ubase_alloc_cmd_mailbox(udev); + if (IS_ERR_OR_NULL(mbx)) { + ubase_err(udev, + "failed to alloc mailbox for destroy EQC.\n"); + return -ENOMEM; + } + ubase_fill_mbx_attr(&attr, eq->eqn, mbx_cmd, 0); + ret = ubase_hw_upgrade_ctx_poll(udev, &attr, mbx); + if (ret) + ubase_err(udev, "failed to destroy EQC, ret = %d.\n", ret); + + __ubase_free_cmd_mailbox(udev, mbx); + dma_free_coherent(udev->dev, eq->addr.size, eq->addr.addr, + eq->addr.dma_addr); + eq->addr.addr = NULL; + + return ret; +} + static int ubase_request_misc_irq(struct ubase_dev *udev) { struct ubase_irq_table *irq_table = &udev->irq_table; @@ -58,6 +379,39 @@ static int ubase_request_misc_irq(struct ubase_dev *udev) return ret; } +static int ubase_request_aeq_irq(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + struct ubase_aeq *aeq = &irq_table->aeq; + struct ubase_irq *irq; + int ret; + + irq = irq_table->irqs[UBASE_AEQ_IRQ_INDEX]; + snprintf(irq->name, UBASE_INT_NAME_LEN, "ubase%d-%s-%d", udev->dev_id, + "aeq", 0); + + ret = ubase_create_eq(udev, &aeq->eq, 0, irq, UBASE_EQ_TYPE_AEQ); + if (ret) { + ubase_err(udev, "failed to create aeq, ret = %d.\n", ret); + return ret; + } + + if (ubase_ubus_irq_vector(udev->dev, 0) == -EOPNOTSUPP) + return 0; + + ret = request_irq(irq->irqn, ubase_aeq_int_handler, 0, irq->name, udev); + if (ret) { + ubase_err(udev, + "failed to request aeq irq, ret = %d.\n", ret); + + if (ubase_destroy_eq(udev, &irq_table->aeq.eq, UBASE_EQ_TYPE_AEQ)) + ubase_err(udev, "failed to destroy aeq.\n"); + return ret; + } + + return 0; +} + static void ubase_free_misc_irq(struct ubase_dev *udev) { struct ubase_irq_table *irq_table = &udev->irq_table; @@ -73,6 +427,25 @@ static void ubase_free_misc_irq(struct ubase_dev *udev) free_irq(irq->irqn, udev); } +static void ubase_free_aeq_irq(struct ubase_dev *udev) +{ + struct ubase_aeq *aeq = &udev->irq_table.aeq; + + if (ubase_ubus_irq_vector(udev->dev, 0) != -EOPNOTSUPP) + free_irq(aeq->eq.irqn, udev); +} + +static void ubase_destroy_aeq(struct ubase_dev *udev) +{ + struct ubase_aeq *aeq = &udev->irq_table.aeq; + + if (!aeq->eq.addr.addr) + return; + + if (ubase_destroy_eq(udev, &aeq->eq, UBASE_EQ_TYPE_AEQ)) + ubase_err(udev, "failed to destroy aeq.\n"); +} + static int ubase_irq_init(struct ubase_dev *udev) { struct ubase_irq_table *irq_table = &udev->irq_table; @@ -143,14 +516,26 @@ static int ubase_request_irq(struct ubase_dev *udev) if (ret) { ubase_err(udev, "failed to request ubase misc irq, ret = %d.\n", ret); - return ret; + goto err_misc_init; + } + + ret = ubase_request_aeq_irq(udev); + if (ret) { + ubase_err(udev, + "failed to request ubase aeq irq, ret = %d.\n", ret); + goto err_aeq_init; } return 0; +err_aeq_init: + ubase_free_misc_irq(udev); +err_misc_init: + return ret; } static void ubase_free_irq(struct ubase_dev *udev) { + ubase_free_aeq_irq(udev); ubase_free_misc_irq(udev); } @@ -180,6 +565,8 @@ int ubase_irq_table_init(struct ubase_dev *udev) goto err_irq_init; } + ubase_clear_all_event_cause(udev); + ret = ubase_request_irq(udev); if (ret) { ubase_err(udev, "failed to request ubase irq, ret = %d.\n", @@ -215,6 +602,7 @@ void ubase_irq_table_free(struct ubase_dev *udev) void ubase_irq_table_uninit(struct ubase_dev *udev) { ubase_irq_table_free(udev); + ubase_destroy_aeq(udev); } static int __ubase_event_register(struct ubase_dev *udev, @@ -346,7 +734,14 @@ void ubase_unregister_ae_event(struct ubase_dev *udev) int ubase_register_ae_event(struct ubase_dev *udev) { - struct ubase_event_nb ubase_ae_nbs[UBASE_AE_LEVEL_NUM] = {}; + struct ubase_event_nb ubase_ae_nbs[UBASE_AE_LEVEL_NUM] = { + { + UBASE_DRV_UNIC, + UBASE_EVENT_TYPE_MB, + { ubase_cmd_mbx_event_cb }, + udev + } + }; struct ubase_aeq *aeq = &udev->irq_table.aeq; int i, ret; diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h index 6d40ef2173bb..326b80879929 100644 --- a/drivers/ub/ubase/ubase_eq.h +++ b/drivers/ub/ubase/ubase_eq.h @@ -13,15 +13,40 @@ #define UBASE_MIN_IRQ_NUM 3 /* for misc aeq ceq */ +#define UBASE_EQ_ALWAYS_ARMED 0x2 +#define UBASE_EQ_COALESCE_0 0 +#define UBASE_EQ_INIT_PROD_IDX 0 +#define UBASE_EQ_INIT_CONS_IDX 0 +#define UBASE_EQ_STAT_INVALID 0 +#define UBASE_EQ_STAT_VALID 1 + +#define UBASE_CTX_SHIFT_BASE 6 +#define UBASE_DEFAULT_EQE_SIZE 64 +#define UBASE_EQE_BA_L_OFFSET 12 +#define UBASE_EQE_BA_L_VALID_BIT GENMASK(19, 0) +#define UBASE_EQE_BA_H_OFFSET 32 +#define UBASE_EQE_BA_H_VALID_BIT GENMASK(31, 0) + +#define EQC_EQ_MAX_PERIOD_INDX 4U + #define UBASE_INT_NAME_LEN 32 #define UBASE_AE_LEVEL_NUM 4 +/* Vector0 interrupt CMDQ event source register(RW) */ +#define UBASE_VECTOR0_CMDQ_SRC_REG 0x18004 /* Vector0 interrupt control register */ #define UBASE_MISC_VECTOR_REG_OFFSET 0x18020 +/* CMDQ register bits for RX event */ +#define UBASE_VECTOR0_RX_CMDQ_INT_B 1 + +enum ubase_async_event_cause_bit { + UBASE_ASYNC_EVENT_CRQ_B, +}; enum ubase_eqc_irqn { UBASE_MISC_IRQ_INDEX, + UBASE_AEQ_IRQ_INDEX, }; struct ubase_irq { @@ -29,6 +54,16 @@ struct ubase_irq { int irqn; }; +struct ubase_eq_db { + u8 eqn; + u8 rsv0; + __le16 type : 2; + __le16 rsv1 : 14; + + __le32 ci : 24; + __le32 rsv2 : 8; +}; + struct ubase_eq_addr { void *addr; dma_addr_t dma_addr; @@ -50,6 +85,62 @@ struct ubase_eq { u32 cons_index; }; +struct ubase_eq_ctx { + /* DW0 */ + u32 state : 2; + u32 arm_st : 2; + u32 eqe_size : 1; + u32 rsv0 : 3; + u32 pi : 24; + + /* DW1 */ + u32 shift : 5; + u32 eqe_coalesce_period : 3; + u32 ci : 24; + + /* DW2 */ + u32 eqe_coalesce_cnt : 10; + u32 rsv1 : 2; + u32 eqe_base_addr_l : 20; + + /* DW3 */ + u32 eqe_base_addr_h; + + /* DW4 */ + u32 eqe_token_id : 20; + u32 rsv2 : 12; + + /* DW5 */ + u32 eqe_token_value; + + /* DW6 */ + u32 irq_num : 16; + u32 rsv3_1 : 16; + + /* DW7 */ + u32 rsv3_2; + + /* DW8 */ + u32 eqn : 8; + u32 eqe_cnt : 10; + u32 rsv4 : 12; + u32 eqe_report_timer_l : 2; + + /* DW9 */ + u32 eqe_report_timer_h; + + /* DW10 */ + u32 funid : 8; + u32 pi_bypass : 24; + + /* DW11 */ + u32 state2 : 2; + u32 rsv5_1 : 30; + + /* DW12~DW15 */ + u32 rsv5_2[4]; +}; + struct ubase_ceq { struct ubase_dev *udev; struct ubase_eq eq; @@ -76,6 +167,12 @@ struct ubase_irq_table { u32 irqs_num; }; +struct ubase_aeq_work { + struct ubase_dev *udev; + struct work_struct work; + struct ubase_aeqe aeqe; +}; + int ubase_irq_table_init(struct ubase_dev *udev); void ubase_irq_table_uninit(struct ubase_dev *udev); -- Gitee From 21b9293cf5f6721b80be259a6b5660b377f8e815 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Fri, 12 Sep 2025 20:24:21 +0800 Subject: [PATCH 08/10] ub: ubase: support for complete event process drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch implements a completion event queue (CEQ) management framework for the ubase driver, with the main features including: 1. Core Processing Mechanism: Adds a CEQ interrupt handler (ubase_ceq_int_handler) and completion event handling logic (ubase_comp_handler), supporting batch processing of completion events. 2. Lifecycle Management: Implements a complete management process for the creation, destruction, interrupt request, and release of CEQs, supporting dynamic creation of multiple CEQ vectors. 3. Interrupt Control: Provides interfaces for enabling/disabling CEQ interrupts (ubase_enable_ce_irqs/ubase_disable_ce_irqs) and sets a flag to prevent automatic interrupt enabling. 4. Resource Management: Uses device-managed memory allocation for CEQ structures, ensures thread safety through a mutex (ceq_lock), and improves error handling mechanisms. 5. Hardware Integration: Configures CEQ parameters based on device capabilities, supports interaction with hardware via mailboxes, and routes completion events to driver instances. This implementation provides efficient completion event handling capabilities for ubase devices, enhances the event handling framework, and complements the existing AEQ functionality. Signed-off-by: Haibin Lu Signed-off-by: Xiaobo Zhang Signed-off-by: Junxin Chen Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/ubase_dev.c | 2 +- drivers/ub/ubase/ubase_eq.c | 218 +++++++++++++++++++++++++++++++++++ drivers/ub/ubase/ubase_eq.h | 11 ++ 3 files changed, 230 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 7e7df281940a..26b80b517702 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -509,7 +509,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "enable ce irq", UBASE_SUP_NO_PMU, 1, - NULL, NULL + ubase_enable_ce_irqs, ubase_disable_ce_irqs }, }; diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 4c7add6ee24b..7c9c5a2a7cd6 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -14,8 +14,42 @@ enum ubase_eq_type { UBASE_EQ_TYPE_AEQ, + UBASE_EQ_TYPE_CEQ, }; +static struct ubase_ceqe *ubase_next_ceqe(struct ubase_ceq *ceq) +{ + struct ubase_eq *eq = &ceq->eq; + struct ubase_ceqe *ceqe; + + ceqe = (struct ubase_ceqe *)(ceq->eq.addr.addr + + (eq->cons_index & (eq->entries_num - 1)) * + eq->eqe_size); + + return !!u32_get_bits(ceqe->comp, UBASE_CEQ_CEQE_OWNER_BIT) ^ + !!(eq->cons_index & eq->entries_num) ? ceqe : NULL; +} + +static void ubase_comp_handler(struct ubase_dev *udev, u32 jfcn) +{ + struct ubase_res_caps *unic_jfc_caps = &udev->caps.unic_caps.jfc; + u32 jfcn_end = unic_jfc_caps->start_idx + unic_jfc_caps->max_cnt; + u32 jfcn_begin = unic_jfc_caps->start_idx; + struct ubase_adev *uadev; + u8 idx; + + if (jfcn_begin <= jfcn && jfcn < jfcn_end) + idx = UBASE_DRV_UNIC; + else + idx = UBASE_DRV_UDMA; + + uadev = udev->priv.uadev[idx]; + if (unlikely(!uadev)) + return; + + atomic_notifier_call_chain(&uadev->comp_nh, jfcn, (void *)&uadev->adev); +} + static void ubase_update_eq_db(struct ubase_eq *eq, enum ubase_eq_type eq_type) { struct ubase_eq_db eq_db = {0}; @@ -23,9 +57,44 @@ static void ubase_update_eq_db(struct ubase_eq *eq, enum ubase_eq_type eq_type) eq_db.eqn = eq->eqn; eq_db.ci = eq->cons_index; + if (eq_type == UBASE_EQ_TYPE_CEQ) + eq_db.type = UBASE_EQ_DB_CMD_CEQ; + writeq(*(__le64 *)&eq_db, eq->db_reg); } +static irqreturn_t ubase_ceq_int_handler(int irq, void *data) +{ +#define UBASE_CEQ_POLLING_BUDGET 128 + + struct ubase_ceq *ceq = (struct ubase_ceq *)data; + struct ubase_ceqe *ceqe = ubase_next_ceqe(ceq); + struct ubase_dev *udev = ceq->udev; + bool ceqe_found = false; + u8 cnt = 0; + u32 jfcn; + + while (cnt++ < UBASE_CEQ_POLLING_BUDGET && ceqe) { + /* Make sure we read CEQ entry after we have checked the + * ownership bit + */ + dma_rmb(); + + jfcn = u32_get_bits(ceqe->comp, UBASE_CEQE_COMP_CQN_M); + + ubase_comp_handler(udev, jfcn); + + ++ceq->eq.cons_index; + ceqe_found = true; + ceqe = ubase_next_ceqe(ceq); + } + + if (ceqe_found) + ubase_update_eq_db(&ceq->eq, UBASE_EQ_TYPE_CEQ); + + return IRQ_RETVAL(ceqe_found); +} + static struct ubase_aeqe *ubase_next_aeqe(struct ubase_dev *udev, struct ubase_aeq *aeq) { @@ -259,6 +328,12 @@ static int ubase_fill_eq_attribute(struct ubase_dev *udev, struct ubase_eq *eq, eq->entries_num = udev->caps.dev_caps.aeqe_depth; eq->eq_period = EQC_EQ_MAX_PERIOD_INDX; eq->eqc_irqn = eqn + udev->caps.dev_caps.num_misc_vectors; + } else { + eq->eqe_size = udev->caps.dev_caps.ceqe_size; + eq->entries_num = udev->caps.dev_caps.ceqe_depth; + eq->eqc_irqn = eqn + udev->caps.dev_caps.num_misc_vectors + + udev->caps.dev_caps.num_aeq_vectors; + eq->eq_period = EQC_EQ_MAX_PERIOD_INDX; } eq->cons_index = 0; @@ -427,6 +502,17 @@ static void ubase_free_misc_irq(struct ubase_dev *udev) free_irq(irq->irqn, udev); } +static void ubase_free_ceq_irqs(struct ubase_dev *udev) +{ + struct ubase_ceqs *ceqs = &udev->irq_table.ceqs; + u32 i; + + for (i = 0; i < ceqs->num; i++) { + if (ubase_ubus_irq_vector(udev->dev, 0) != -EOPNOTSUPP) + free_irq(ceqs->ceq[i].eq.irqn, &ceqs->ceq[i]); + } +} + static void ubase_free_aeq_irq(struct ubase_dev *udev) { struct ubase_aeq *aeq = &udev->irq_table.aeq; @@ -435,6 +521,24 @@ static void ubase_free_aeq_irq(struct ubase_dev *udev) free_irq(aeq->eq.irqn, udev); } +static void ubase_destroy_ceqs(struct ubase_dev *udev) +{ + struct ubase_ceqs *ceqs = &udev->irq_table.ceqs; + u32 i; + + if (!ceqs->ceq) + return; + + for (i = 0; i < ceqs->num; i++) { + if (ubase_destroy_eq(udev, &ceqs->ceq[i].eq, UBASE_EQ_TYPE_CEQ)) + ubase_err(udev, "failed to destroy ceq[%u].\n", i); + } + mutex_lock(&udev->irq_table.ceq_lock); + devm_kfree(udev->dev, ceqs->ceq); + ceqs->ceq = NULL; + mutex_unlock(&udev->irq_table.ceq_lock); +} + static void ubase_destroy_aeq(struct ubase_dev *udev) { struct ubase_aeq *aeq = &udev->irq_table.aeq; @@ -446,6 +550,78 @@ static void ubase_destroy_aeq(struct ubase_dev *udev) ubase_err(udev, "failed to destroy aeq.\n"); } +static int ubase_request_ceq_irq(struct ubase_dev *udev, struct ubase_ceq *ceq, + u32 index) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + struct ubase_irq *irq; + int ret; + + irq = irq_table->irqs[index + UBASE_CEQ_IRQ_INDEX]; + snprintf(irq->name, UBASE_INT_NAME_LEN, "ubase%d-%s-%u", udev->dev_id, + "ceq", index); + ceq->udev = udev; + ret = ubase_create_eq(udev, &ceq->eq, index, irq, UBASE_EQ_TYPE_CEQ); + if (ret) { + ubase_err(udev, + "failed to create ceq[%u], ret = %d.\n", index, ret); + return ret; + } + + if (ubase_ubus_irq_vector(udev->dev, 0) == -EOPNOTSUPP) + return 0; + + irq_set_status_flags(irq->irqn, IRQ_NOAUTOEN); + ret = request_irq(irq->irqn, ubase_ceq_int_handler, 0, irq->name, ceq); + if (ret) { + ubase_err(udev, "failed to request ceq[%u], ret = %d.\n", + index, ret); + if (ubase_destroy_eq(udev, &ceq->eq, UBASE_EQ_TYPE_CEQ)) + ubase_err(udev, "failed to destroy ceq.\n"); + } + + return ret; +} + +static int ubase_request_ceq_irqs(struct ubase_dev *udev) +{ + struct ubase_irq_table *irq_table = &udev->irq_table; + struct ubase_ceqs *ceqs = &irq_table->ceqs; + u32 ceq_irq_num, i; + int ret; + + mutex_lock(&udev->irq_table.ceq_lock); + ceq_irq_num = udev->caps.dev_caps.num_ceq_vectors; + ceqs->ceq = devm_kcalloc(udev->dev, ceq_irq_num, + sizeof(struct ubase_ceq), GFP_KERNEL); + if (!ceqs->ceq) { + mutex_unlock(&udev->irq_table.ceq_lock); + return -ENOMEM; + } + mutex_unlock(&udev->irq_table.ceq_lock); + + ceqs->num = ceq_irq_num; + + for (i = 0; i < ceq_irq_num; i++) { + ret = ubase_request_ceq_irq(udev, &ceqs->ceq[i], i); + if (ret) { + ubase_err(udev, + "failed to request ceq[%u] irq, ret = %d.\n", + i, ret); + ceqs->num = i; + goto err_alloc_ceq; + } + } + + return 0; + +err_alloc_ceq: + ubase_free_ceq_irqs(udev); + ubase_destroy_ceqs(udev); + + return ret; +} + static int ubase_irq_init(struct ubase_dev *udev) { struct ubase_irq_table *irq_table = &udev->irq_table; @@ -526,7 +702,18 @@ static int ubase_request_irq(struct ubase_dev *udev) goto err_aeq_init; } + ret = ubase_request_ceq_irqs(udev); + if (ret) { + ubase_err(udev, + "failed to request ubase ceq irqs, ret = %d.\n", ret); + goto err_ceq_init; + } + return 0; + +err_ceq_init: + ubase_free_aeq_irq(udev); + ubase_destroy_aeq(udev); err_aeq_init: ubase_free_misc_irq(udev); err_misc_init: @@ -535,6 +722,7 @@ static int ubase_request_irq(struct ubase_dev *udev) static void ubase_free_irq(struct ubase_dev *udev) { + ubase_free_ceq_irqs(udev); ubase_free_aeq_irq(udev); ubase_free_misc_irq(udev); } @@ -602,7 +790,37 @@ void ubase_irq_table_free(struct ubase_dev *udev) void ubase_irq_table_uninit(struct ubase_dev *udev) { ubase_irq_table_free(udev); + ubase_destroy_ceqs(udev); ubase_destroy_aeq(udev); + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + mutex_destroy(&udev->irq_table.ceq_lock); +} + +void ubase_disable_ce_irqs(struct ubase_dev *udev) +{ + struct ubase_ceqs *ceqs = &udev->irq_table.ceqs; + u32 i; + + if (test_bit(UBASE_STATE_IRQ_INVALID_B, &udev->state_bits)) + return; + + for (i = 0; i < ceqs->num; i++) + disable_irq(ceqs->ceq[i].eq.irqn); +} + +int ubase_enable_ce_irqs(struct ubase_dev *udev) +{ + struct ubase_ceqs *ceqs = &udev->irq_table.ceqs; + u32 i; + + if (test_bit(UBASE_STATE_IRQ_INVALID_B, &udev->state_bits)) + return 0; + + for (i = 0; i < ceqs->num; i++) + enable_irq(ceqs->ceq[i].eq.irqn); + + return 0; } static int __ubase_event_register(struct ubase_dev *udev, diff --git a/drivers/ub/ubase/ubase_eq.h b/drivers/ub/ubase/ubase_eq.h index 326b80879929..9b7b04911e92 100644 --- a/drivers/ub/ubase/ubase_eq.h +++ b/drivers/ub/ubase/ubase_eq.h @@ -27,6 +27,9 @@ #define UBASE_EQE_BA_H_OFFSET 32 #define UBASE_EQE_BA_H_VALID_BIT GENMASK(31, 0) +#define UBASE_CEQ_CEQE_OWNER_BIT BIT(31) +#define UBASE_CEQE_COMP_CQN_M GENMASK(19, 0) +#define UBASE_EQ_DB_CMD_CEQ 0x2 #define EQC_EQ_MAX_PERIOD_INDX 4U #define UBASE_INT_NAME_LEN 32 @@ -47,6 +50,7 @@ enum ubase_async_event_cause_bit { enum ubase_eqc_irqn { UBASE_MISC_IRQ_INDEX, UBASE_AEQ_IRQ_INDEX, + UBASE_CEQ_IRQ_INDEX, }; struct ubase_irq { @@ -64,6 +68,11 @@ struct ubase_eq_db { __le32 rsv2 : 8; }; +struct ubase_ceqe { + u32 comp; + u32 rsv[15]; +}; + struct ubase_eq_addr { void *addr; dma_addr_t dma_addr; @@ -180,5 +189,7 @@ int ubase_register_ae_event(struct ubase_dev *udev); void ubase_unregister_ae_event(struct ubase_dev *udev); void ubase_enable_misc_vector(struct ubase_dev *udev, bool enable); +void ubase_disable_ce_irqs(struct ubase_dev *udev); +int ubase_enable_ce_irqs(struct ubase_dev *udev); #endif -- Gitee From a5587f20a0e17e90b854065efbd6f7b76d97dc9b Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Tue, 16 Sep 2025 11:38:45 +0800 Subject: [PATCH 09/10] ub: ubase: support for cdma auxiliary device drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch support for cdma auxiliary device. The following steps are completed: 1.add device id and cdma auxiliary device type to the auxiliary bus. 2.add aeq and ceq notification chain process of the cdma. Signed-off-by: Zesong Li Signed-off-by: Xiaobo Zhang Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/ubase_eq.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubase/ubase_eq.c b/drivers/ub/ubase/ubase_eq.c index 7c9c5a2a7cd6..e82de4fa9069 100644 --- a/drivers/ub/ubase/ubase_eq.c +++ b/drivers/ub/ubase/ubase_eq.c @@ -38,7 +38,10 @@ static void ubase_comp_handler(struct ubase_dev *udev, u32 jfcn) struct ubase_adev *uadev; u8 idx; - if (jfcn_begin <= jfcn && jfcn < jfcn_end) + /* CDMA exists independently, UNIC/UDMA are distinguished by jfcn. */ + if (udev->priv.uadev[UBASE_DRV_CDMA]) + idx = UBASE_DRV_CDMA; + else if (jfcn_begin <= jfcn && jfcn < jfcn_end) idx = UBASE_DRV_UNIC; else idx = UBASE_DRV_UDMA; @@ -210,6 +213,8 @@ static void ubase_aeq_event_handler(struct ubase_dev *udev, if (ubase_is_comm_event(udev, aeqe)) idx = UBASE_DRV_UNIC; + else if (ubase_dev_cdma_supported(udev)) + idx = UBASE_DRV_CDMA; else if (ubase_is_udma_event(udev, event_type, sub_type, aeqe)) idx = UBASE_DRV_UDMA; else -- Gitee From a748088829be770a59028c9b9c3f6763a8e48cf3 Mon Sep 17 00:00:00 2001 From: Xiongchuan Zhou Date: Tue, 16 Sep 2025 15:32:51 +0800 Subject: [PATCH 10/10] ub: ubase: Support for public Context memory allocation and release. drivers inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID41MP CVE: NA ------------------------------------------------------------ This patch adds comprehensive management functionality for hardware context buffers to the ubase driver, primarily implementing the following: 1. Core Initialization Mechanism: New functions `ubase_hw_init` and `ubase_hw_uninit` have been added and integrated into the device initialization process, responsible for the creation and destruction of hardware contexts. 2. Context Buffer Management: - Supports multiple context types (JFS, JFR, JFC, JTG, RC). - Implements mechanisms for context allocation, population, and release. - Utilizes XArray for efficient management of context pages. 3. Hardware Resource Configuration: - Dynamically calculates the number and size of context entries based on device capabilities. - Supports configuration of contexts through mailbox communication with hardware. - Adds resource destruction commands and waiting mechanisms. 4. Memory Management Optimization: - Uses DMA IOVA allocator to manage context buffers. - Supports pre-allocated memory (PMEM) optimization for JFS context configuration. This implementation provides ubase devices with complete hardware context management capabilities, laying the foundation for subsequent data transfer and queue operations. Signed-off-by: Fengyan Mu Signed-off-by: Xiongchuan Zhou --- drivers/ub/ubase/ubase_dev.c | 2 +- drivers/ub/ubase/ubase_dev.h | 15 ++ drivers/ub/ubase/ubase_hw.c | 343 ++++++++++++++++++++++++++++++ drivers/ub/ubase/ubase_hw.h | 9 + include/ub/ubase/ubase_comm_cmd.h | 1 + include/ub/ubase/ubase_comm_hw.h | 8 + 6 files changed, 377 insertions(+), 1 deletion(-) diff --git a/drivers/ub/ubase/ubase_dev.c b/drivers/ub/ubase/ubase_dev.c index 26b80b517702..c0ee8be61660 100644 --- a/drivers/ub/ubase/ubase_dev.c +++ b/drivers/ub/ubase/ubase_dev.c @@ -493,7 +493,7 @@ static const struct ubase_init_function ubase_init_func_map[] = { }, { "init hw", UBASE_SUP_NO_PMU, 1, - NULL, NULL + ubase_hw_init, ubase_hw_uninit }, { "init debugfs", UBASE_SUP_ALL, 0, diff --git a/drivers/ub/ubase/ubase_dev.h b/drivers/ub/ubase/ubase_dev.h index ee59ac41c8be..e53462115b3d 100644 --- a/drivers/ub/ubase/ubase_dev.h +++ b/drivers/ub/ubase/ubase_dev.h @@ -77,6 +77,11 @@ struct ubase_mbox_cmd { struct ubase_mbx_event_context ctx; }; +struct ubase_destroy_res_cmd { + u8 destroy_done; + u8 rsv[23]; +}; + struct ubase_dma_buf { void *addr; dma_addr_t dma_addr; @@ -281,6 +286,16 @@ static inline bool ubase_dev_eth_mac_supported(struct ubase_dev *udev) return ubase_get_cap_bit(udev, UBASE_SUPPORT_ETH_MAC_B); } +static inline u32 ubase_jfs_num(struct ubase_dev *udev) +{ + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_adev_caps *udma_caps = &udev->caps.udma_caps; + struct ubase_caps *dev_caps = &udev->caps.dev_caps; + + return unic_caps->jfs.max_cnt + udma_caps->jfs.max_cnt + + dev_caps->public_jetty_cnt + dev_caps->rsvd_jetty_cnt; +} + int ubase_adev_idx_alloc(void); void ubase_adev_idx_free(int id); diff --git a/drivers/ub/ubase/ubase_hw.c b/drivers/ub/ubase/ubase_hw.c index 11e32da647dc..332178813766 100644 --- a/drivers/ub/ubase/ubase_hw.c +++ b/drivers/ub/ubase/ubase_hw.c @@ -4,9 +4,17 @@ * */ +#include +#include +#include +#include + #include "ubase_cmd.h" +#include "ubase_mailbox.h" #include "ubase_hw.h" +#define UBASE_CTX_REMOVE_ALL (-2) + struct ubase_dma_buf_desc { struct ubase_dma_buf *buf; u16 opc; @@ -215,6 +223,272 @@ int ubase_query_dev_res(struct ubase_dev *udev) return ubase_parse_dev_res(udev, &resp); } +static int ubase_config_ctx_buf_to_hw(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_mbx_attr *attr) +{ + struct ubase_cmd_mailbox mailbox; + int ret; + + mailbox.dma = ctx_buf->dma_ctx_buf_ba; + ret = __ubase_hw_upgrade_ctx(udev, attr, &mailbox); + if (ret) + ubase_err(udev, + "failed to config ctx_buf to hw, cmd = 0x%x, ret = %d.\n", + attr->op, ret); + return ret; +} + +static void ubase_free_and_clear_ctx_buf(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf) +{ + struct ubase_ctx_page *ctx_page; + size_t npage; + + if (!xa_empty(&ctx_buf->ctx_xa)) { + xa_for_each(&ctx_buf->ctx_xa, npage, ctx_page) + ubase_destroy_ctx_page(udev, ctx_page, ctx_buf); + } + dma_free_iova(ctx_buf->slot); + + ctx_buf->slot = NULL; + ctx_buf->dma_ctx_buf_ba = 0; +} + +static void ubase_cmd_ctx_buf_free(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf) +{ + size_t size; + + if (!ctx_buf || !ctx_buf->slot) + return; + + size = ctx_buf->entry_cnt * ctx_buf->entry_size; + if (!size) + return; + + ubase_free_and_clear_ctx_buf(udev, ctx_buf); + xa_destroy(&ctx_buf->ctx_xa); +} + +static void ubase_ctx_free(struct ubase_dev *udev, + struct ubase_ctx_buf *ctx_buf, int idx) +{ + struct ubase_ctx_buf_map map[] = { + { &ctx_buf->jfs, UBASE_MB_WRITE_JFS_CONTEXT_VA }, + { &ctx_buf->jfr, UBASE_MB_WRITE_JFR_CONTEXT_VA }, + { &ctx_buf->jfc, UBASE_MB_WRITE_JFC_CONTEXT_VA }, + { &ctx_buf->jtg, UBASE_MB_WRITE_JETTY_GROUP_CONTEXT_VA }, + { &ctx_buf->rc, UBASE_MB_WRITE_RC_CONTEXT_VA }, + }; + int i, end_idx = ARRAY_SIZE(map) - 1; + + i = (idx == UBASE_CTX_REMOVE_ALL) ? end_idx : idx; + for (; i >= 0; i--) + ubase_cmd_ctx_buf_free(udev, map[i].ctx); +} + +static int ubase_fill_common_ctx_buf(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + u32 start_pos, u32 size) +{ + struct ubase_ctx_page *ctx_page; + size_t npage; + int ret; + u32 i; + + mutex_lock(&ctx_buf->ctx_mutex); + + for (i = start_pos; i < start_pos + size; i++) { + ctx_page = (struct ubase_ctx_page *)xa_load(&ctx_buf->ctx_xa, i); + if (ctx_page) + continue; + + ret = ubase_create_ctx_page(udev, ctx_buf, &ctx_page, i); + if (ret) { + ubase_err(udev, "failed to create context page, ret = %d.\n", + ret); + goto err_fill_ctx_page; + } + + ret = xa_err(xa_store(&ctx_buf->ctx_xa, i, ctx_page, + GFP_KERNEL)); + if (ret) { + ubase_err(udev, "failed to store page, ret = %d.\n", + ret); + goto err_fill_ctx_page; + } + } + + mutex_unlock(&ctx_buf->ctx_mutex); + + return 0; + +err_fill_ctx_page: + xa_for_each(&ctx_buf->ctx_xa, npage, ctx_page) + ubase_destroy_ctx_page(udev, ctx_page, ctx_buf); + mutex_unlock(&ctx_buf->ctx_mutex); + + return ret; +} + +static int ubase_fill_ctx_inherent_buf(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_mbx_attr *attr) +{ + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + u32 buf_size, page_cnt; + u32 start_pos = 0; + + switch (attr->op) { + case UBASE_MB_WRITE_JFS_CONTEXT_VA: + start_pos = udev->caps.unic_caps.jfs.start_idx >> + ctx_buf->cnt_per_page_shift; + buf_size = unic_caps->jfs.max_cnt * ctx_buf->entry_size; + break; + case UBASE_MB_WRITE_JFR_CONTEXT_VA: + buf_size = unic_caps->jfr.max_cnt * ctx_buf->entry_size; + break; + case UBASE_MB_WRITE_JFC_CONTEXT_VA: + buf_size = unic_caps->jfc.max_cnt * ctx_buf->entry_size; + break; + default: + return 0; + } + + page_cnt = DIV_ROUND_UP(buf_size, PAGE_SIZE); + return ubase_fill_common_ctx_buf(udev, ctx_buf, start_pos, page_cnt); +} + +static int ubase_alloc_and_fill_ctx_buf(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_mbx_attr *attr, + size_t size) +{ + size_t sizep; + int ret; + + ctx_buf->cnt_per_page_shift = + ilog2(roundup_pow_of_two(PAGE_SIZE / ctx_buf->entry_size)); + ctx_buf->slot = dma_alloc_iova(udev->dev, size, 0, + &ctx_buf->dma_ctx_buf_ba, &sizep); + if (IS_ERR(ctx_buf->slot)) { + ubase_err(udev, + "failed to alloc iova slot, cmd = 0x%x, size = %lu.\n", + attr->op, size); + return -ENOMEM; + } + + ret = ubase_fill_ctx_inherent_buf(udev, ctx_buf, attr); + if (ret) { + ubase_err(udev, + "failed to fill inherent ctx buf, cmd = 0x%x, ret = %d.\n", + attr->op, ret); + dma_free_iova(ctx_buf->slot); + } + + return ret; +} + +static int ubase_cmd_ctx_buf_alloc(struct ubase_dev *udev, + struct ubase_ctx_buf_cap *ctx_buf, + struct ubase_mbx_attr *attr) +{ + size_t size = ctx_buf->entry_cnt * ctx_buf->entry_size; + int ret; + + if (!size) + return 0; + + xa_init(&ctx_buf->ctx_xa); + ret = ubase_alloc_and_fill_ctx_buf(udev, ctx_buf, attr, size); + if (ret) + goto err_ctx_alloc; + + ret = ubase_config_ctx_buf_to_hw(udev, ctx_buf, attr); + if (ret) + goto err_ctx_to_hw; + + return 0; + +err_ctx_to_hw: + ubase_free_and_clear_ctx_buf(udev, ctx_buf); + +err_ctx_alloc: + xa_destroy(&ctx_buf->ctx_xa); + + return ret; +} + +static int ubase_ctx_alloc(struct ubase_dev *udev, + struct ubase_ctx_buf *ctx_buf) +{ + struct ubase_ctx_buf_map map[] = { + { &ctx_buf->jfs, UBASE_MB_WRITE_JFS_CONTEXT_VA }, + { &ctx_buf->jfr, UBASE_MB_WRITE_JFR_CONTEXT_VA }, + { &ctx_buf->jfc, UBASE_MB_WRITE_JFC_CONTEXT_VA }, + { &ctx_buf->jtg, UBASE_MB_WRITE_JETTY_GROUP_CONTEXT_VA }, + { &ctx_buf->rc, UBASE_MB_WRITE_RC_CONTEXT_VA }, + }; + struct ubase_mbx_attr attr = {0}; + int ret, i; + + for (i = 0; i < ARRAY_SIZE(map); i++) { + memset(&attr, 0, sizeof(attr)); + attr.op = map[i].mb_cmd; + ret = ubase_cmd_ctx_buf_alloc(udev, map[i].ctx, &attr); + if (ret) { + ubase_err(udev, + "failed to alloc ctx buf, mb_cmd = 0x%x, ret = %d.\n", + map[i].mb_cmd, ret); + goto err_alloc_ctx_buf; + } + } + + return 0; + +err_alloc_ctx_buf: + ubase_ctx_free(udev, ctx_buf, --i); + return ret; +} + +static void ubase_get_ctx_entry_cnt(struct ubase_dev *udev) +{ + struct ubase_adev_caps *unic_caps = &udev->caps.unic_caps; + struct ubase_adev_caps *udma_caps = &udev->caps.udma_caps; + struct ubase_ctx_buf *ubase_ctx_buf = &udev->ctx_buf; + + ubase_ctx_buf->jfs.entry_cnt = ubase_jfs_num(udev); + ubase_ctx_buf->jfr.entry_cnt = unic_caps->jfr.max_cnt; + ubase_ctx_buf->jfc.entry_cnt = unic_caps->jfc.max_cnt; + ubase_ctx_buf->jtg.entry_cnt = udma_caps->jtg_max_cnt; + ubase_ctx_buf->rc.entry_cnt = udma_caps->rc_max_cnt; + + ubase_ctx_buf->jfr.entry_cnt += udma_caps->jfr.max_cnt; + ubase_ctx_buf->jfc.entry_cnt += udma_caps->jfc.max_cnt; +} + +static void ubase_get_ctx_entry_size(struct ubase_dev *udev) +{ + struct ubase_ctx_buf *ubase_ctx_buf = &udev->ctx_buf; + + ubase_ctx_buf->jfs.entry_size = UBASE_JFS_CTX_SIZE; + ubase_ctx_buf->jfr.entry_size = UBASE_JFR_CTX_SIZE; + ubase_ctx_buf->jfc.entry_size = UBASE_JFC_CTX_SIZE; + ubase_ctx_buf->jtg.entry_size = UBASE_JTG_CTX_SIZE; + ubase_ctx_buf->rc.entry_size = UBASE_RC_CTX_SIZE; +} + +static int ubase_ctx_buf_alloc(struct ubase_dev *udev) +{ + struct ubase_ctx_buf *ctx_buf = &udev->ctx_buf; + + ubase_get_ctx_entry_cnt(udev); + ubase_get_ctx_entry_size(udev); + + return ubase_ctx_alloc(udev, ctx_buf); +} + int ubase_query_controller_info(struct ubase_dev *udev) { struct ubase_caps *dev_caps = &udev->caps.dev_caps; @@ -267,3 +541,72 @@ int ubase_query_chip_info(struct ubase_dev *udev) return 0; } + +static void ubase_destroy_ctx_res(struct ubase_dev *udev) +{ +#define UBASE_DESTROY_RES_WAIT_TIME 20 +#define UBASE_DESTROY_RES_WAIT_COUNT 5 + + struct ubase_destroy_res_cmd resp; + struct ubase_cmd_buf in, out; + int try_cnt = 0; + int ret; + + __ubase_fill_inout_buf(&in, UBASE_OPC_DESTROY_CTX_RESOURCE, false, 0, NULL); + ret = __ubase_cmd_send_in(udev, &in); + if (ret) { + ubase_err(udev, "failed to send destroy resource, ret = %d.\n", + ret); + return; + } + + __ubase_fill_inout_buf(&in, UBASE_OPC_DESTROY_CTX_RESOURCE, true, 0, NULL); + __ubase_fill_inout_buf(&out, UBASE_OPC_DESTROY_CTX_RESOURCE, false, + sizeof(resp), &resp); + do { + memset(&resp, 0, sizeof(resp)); + msleep(UBASE_DESTROY_RES_WAIT_TIME); + ret = __ubase_cmd_send_inout(udev, &in, &out); + if (ret) { + ubase_err(udev, + "failed to query destroy resource, ret = %d.\n", + ret); + return; + } + + try_cnt++; + } while (!resp.destroy_done && try_cnt < UBASE_DESTROY_RES_WAIT_COUNT); + + if (!resp.destroy_done) + ubase_warn(udev, "wait ue destroy res timeout!\n"); +} + +static inline void ubase_uninit_ctx_buf(struct ubase_dev *udev) +{ + ubase_ctx_free(udev, &udev->ctx_buf, UBASE_CTX_REMOVE_ALL); +} + +int ubase_hw_init(struct ubase_dev *udev) +{ + int ret; + + ret = ubase_ctx_buf_alloc(udev); + if (ret) { + ubase_err(udev, "failed to init ctx buf, ret = %d.\n", ret); + return ret; + } + + set_bit(UBASE_STATE_CTX_READY_B, &udev->state_bits); + + return 0; +} + +void ubase_hw_uninit(struct ubase_dev *udev) +{ + clear_bit(UBASE_STATE_CTX_READY_B, &udev->state_bits); + + if (!test_bit(UBASE_STATE_RST_HANDLING_B, &udev->state_bits)) + ubase_destroy_ctx_res(udev); + + ubase_uninit_ctx_buf(udev); +} diff --git a/drivers/ub/ubase/ubase_hw.h b/drivers/ub/ubase/ubase_hw.h index b96db6d5c126..b70097555a3e 100644 --- a/drivers/ub/ubase/ubase_hw.h +++ b/drivers/ub/ubase/ubase_hw.h @@ -9,6 +9,8 @@ #include +#define UBASE_CTX_REMOVE_ALL (-2) + #define UBASE_DEF_CEQ_VECTOR_NUM 1 #define UBASE_DEF_AEQ_VECTOR_NUM 1 #define UBASE_DEF_MISC_VERCTOR_NUM 1 @@ -130,6 +132,13 @@ struct ubase_query_chip_die_cmd { __le16 io_port_logic_id; }; +struct ubase_ctx_buf_map { + struct ubase_ctx_buf_cap *ctx; + u16 mb_cmd; +}; + +int ubase_hw_init(struct ubase_dev *udev); +void ubase_hw_uninit(struct ubase_dev *udev); int ubase_query_dev_res(struct ubase_dev *udev); int ubase_query_chip_info(struct ubase_dev *udev); int ubase_query_controller_info(struct ubase_dev *udev); diff --git a/include/ub/ubase/ubase_comm_cmd.h b/include/ub/ubase/ubase_comm_cmd.h index b8aa23431003..38f327a5bb8c 100644 --- a/include/ub/ubase/ubase_comm_cmd.h +++ b/include/ub/ubase/ubase_comm_cmd.h @@ -43,6 +43,7 @@ enum ubase_opcode_type { /* Software commands */ UBASE_OPC_MUE_TO_UE = 0xF001, UBASE_OPC_UE_TO_MUE = 0xF002, + UBASE_OPC_DESTROY_CTX_RESOURCE = 0xF00D, }; union ubase_mbox { diff --git a/include/ub/ubase/ubase_comm_hw.h b/include/ub/ubase/ubase_comm_hw.h index e212b4a7d2bc..ba3717fb16b3 100644 --- a/include/ub/ubase/ubase_comm_hw.h +++ b/include/ub/ubase/ubase_comm_hw.h @@ -10,6 +10,14 @@ #include #include +#define UBASE_AEQ_CTX_SIZE 64 +#define UBASE_CEQ_CTX_SIZE 64 +#define UBASE_JFS_CTX_SIZE 256 +#define UBASE_JFR_CTX_SIZE 64 +#define UBASE_JFC_CTX_SIZE 128 +#define UBASE_RC_CTX_SIZE 256 +#define UBASE_JTG_CTX_SIZE 8 + #define UBASE_DESC_DATA_LEN 6 struct ubase_cmdq_desc { __le16 opcode; -- Gitee